content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def get_height_of_objects(tet_file):
"""Return the height of the test object."""
mesh_lines = list(open(tet_file, "r"))
mesh_lines = [line.strip('\n') for line in mesh_lines]
zs = []
for ml in mesh_lines:
sp = ml.split(" ")
if sp[0] == 'v':
zs.append(float(sp[3]))
return 2 * abs(min(zs))
|
9e763feb24a774cdcbc4c0a1ee13a8f0a4253989
| 57,001
|
def add_element_to_dict(dictionary, element):
""" Adds an element to a dictionary. If not in dictionary, adds a new key to dictionary.
:param dictionary: dictionary
:param element: string
:return: updated dictionary
"""
if len(element) == 1:
return dictionary
if element not in dictionary.keys():
dictionary[element] = 1
else:
dictionary[element] += 1
return dictionary
|
d55e0124e9b940b3cfcbf94674a2009ae46b1f55
| 57,005
|
def generate_range(min: int, max: int, step: int) -> list:
""" This function generates a range of integers from min to max, with the step. """
return [i for i in range(min, max + 1, step)]
|
e0d0871d52b6ae671bfdcefb15c7bf55bc71fcfa
| 57,012
|
import math
def rotate_image_point(x, y, xmid, ymid, angle):
""" rotate image point about xm, ym by angle in radians
Arguments:
x: x coordinate of point to rotate
y: y coordinate of point to rotate
xmid: x coordinate of center point to rotate about
ymid: y coordinate of center point to rotate about
angle: angle in radians about which to coordinate,
counter-clockwise is positive
Returns:
tuple of rotated points (xr, yr)
"""
# translate point relative to the mid point
x = x - xmid
y = y - ymid
# rotate by angle and translate back
# the photo coordinate system is downwards y is positive so
# need to adjust the rotation accordingly
cos_angle = math.cos(angle)
sin_angle = math.sin(angle)
xr = x * cos_angle + y * sin_angle + xmid
yr = -x * sin_angle + y * cos_angle + ymid
return (xr, yr)
|
242cd09ac90ce0a7c4faf8c595232ee1a7f74441
| 57,016
|
def train_model(model, feature_matrix, train_values):
"""
Train classifier model
:param model: classifier model
:param feature_matrix: feature matrix
:param train_values: values to train the model on
:return: trained model
"""
model["classifier"].fit(feature_matrix, train_values)
return model
|
f77819fba153c9dca0b397db7ad3c4c0d840083a
| 57,017
|
def make_bigrams(bigram_mod, texts):
"""Form bigrams in text
Args:
bigram_mod (gensim.models.phrases.Phraser): Gensim bigram model
texts (list): 2-dimensional list of words
Returns:
(list): bigrams formed from texts
"""
return [bigram_mod[doc] for doc in texts]
|
8a1aafb0c51852ee98432012743aa21494fb86c0
| 57,018
|
def deg_to_hms(deg):
"""Convert decimal degrees to (hr,min,sec)"""
h = int(deg)//15
deg -= h*15
m = int(deg*4)
s = (deg-m//4)/15.
return h,m,s
|
6e400d79b7c74fb0dd27e24cb10e67fd79be11cb
| 57,020
|
def logical_not(s):
"""
Return the logical negation of the given binary string
:param s: the string to convert
:type s: str
:return: str
"""
ns = ''
for c in s:
if c == '0':
ns += '1'
elif c == '1':
ns += '0'
else:
raise Exception("Invalid binary string")
return ns
|
196ec5da894f46988116a6fda3c9df195a50eda6
| 57,024
|
def substring_search(word, collection):
"""Finds all matches in the `collection` for the specified `word`.
If `word` is empty, returns all items in `collection`.
:type word: str
:param word: The substring to search for.
:type collection: collection, usually a list
:param collection: A collection of words to match.
:rtype: list of strings
:return: A sorted list of matching words from collection.
"""
return [item for item in sorted(collection) if item.startswith(word)]
|
dd200f02a7fe9ed09f01ad267c498d6bf32f6024
| 57,031
|
import math
def std_periodic_var(p, mid=0.0, pi=math.pi):
"""
Transform a periodic variable into its range.
>>> std_periodic_var(math.pi)
-3.1415...
>>> std_periodic_var(2*math.pi + 0.01)
0.0...
:param p: Value
:param mid: The middle value
:param pi: Half-range
:return: The transformed value
"""
twopi = 2 * pi
while p < mid - pi:
p += twopi
while p >= mid + pi:
p -= twopi
return p
|
4b7840e6dcc4c5d947c64e08c2a4555881c69013
| 57,035
|
import bz2
import gzip
def open_output(output, opts):
"""Open output file with right compression library
"""
if opts.bzip:
return bz2.open(output, 'wt')
elif opts.gzip:
return gzip.open(output, 'wt')
else:
return open(output, 'w')
|
9f830afb3422f0707ef6b49c9b2000a35b4bc0a0
| 57,036
|
def setup_info_args(parser, parent):
"""Helper function to set up a subparser for the `info` command.
:param parser: argparse parser to add the `info` command to
:type args: argparse.ArgumentParser
:param parent: argparse parser parent of the newly added subcommand.
used to inherit shared commands/flags
:type args: argparse.ArgumentParser
:return: the newly added parser
:rtype: argparse.ArgumentParser
"""
parser_info = parser.add_parser(
'info',
description='Display package and citation information',
help='Display package and citation information',
parents=[parent],
add_help=False,
)
return parser_info
|
ef5b920fea2b00833b3194e90c38e881ce88b4ae
| 57,042
|
def _splitstrip(string, delim=u','):
"""Split string (at commas by default) and strip whitespace from the
pieces.
"""
return [s.strip() for s in string.split(delim)]
|
26b62cd6ff9f23908efa0e1be839f9171dbe9a78
| 57,045
|
from re import match
def valid_input(instructions):
"""
Validate input to be a multi-line string containing directions [UDLR]+
:param instructions: Multiline string input
:return: Boolean value whether input is valid
"""
m = match(r'[UDLR]+', ''.join(instructions))
return m is not None and m.span() == (0, sum(len(i) for i in instructions))
|
5e94b803c2a4659c217d5f42511de79140d52c6b
| 57,056
|
def get_member_by_name(slack_client, user_name):
"""
Return the member matching the given user_name.
"""
memberList = slack_client.api_call("users.list")
if memberList and memberList['members']:
for member in memberList['members']:
if member['name'] == user_name:
return member
return None
|
1e96454663e7b1f453e1433efff46d61f633045c
| 57,070
|
def _remove_tags(tags, removals):
""" In all tags list, remove tags in removals. """
for tag in removals:
while tag in tags:
tags.remove(tag)
return tags
|
0b194437d16935a11fe867309ffc3944b56506cf
| 57,071
|
def extract_data_values(data, value_key):
"""Extract value from a pandas.DataFrame
Parmeters
---------
data : dict
The raw data.
value_key : string
A column in data which contains the values we are to extract.
Returns
-------
values : dict
A dict where the keys are the id's found in data_key and
the values are the correconding values from data_value.
"""
values = {}
for key in data:
values[key] = data[key][value_key]
return values
|
42d713bb7b3970c965e2c5f503032cd6fd66baf6
| 57,077
|
def get_deletions_x(parsed_mutations, max_mutations_per_pos_dict):
"""Get x coordinates of deletion markers to overlay in heatmap.
These are the linear x coordinates used in the Plotly graph object.
i.e., the indices of data["heatmap_x_nt_pos"]
:param parsed_mutations: A dictionary containing multiple merged
``get_parsed_gvf_dir`` return "mutations" values.
:type parsed_mutations: dict
:param max_mutations_per_pos_dict: See
``get_max_mutations_per_pos`` return value.
:type max_mutations_per_pos_dict: dict
:return: List of x coordinate values to display insertion markers
:rtype: list[int]
"""
ret = []
for strain in parsed_mutations:
# How far markers need to be pushed right due to earlier
# heterozygous mutations.
x_offset = 0
for i, pos in enumerate(max_mutations_per_pos_dict):
num_of_mutations = max_mutations_per_pos_dict[pos]
if pos in parsed_mutations[strain]:
for j, mutation in enumerate(parsed_mutations[strain][pos]):
deletion = mutation["mutation_type"] == "deletion"
hidden = mutation["hidden_cell"]
if deletion and not hidden:
ret.append(i+j+x_offset)
x_offset += num_of_mutations - 1
return ret
|
1e1b1042580e8260bfe1ae798fbcf38e85b76f0a
| 57,083
|
from typing import List
from typing import Dict
def get_phrases(buckets: List, data: Dict) -> Dict[float, List[str]]:
"""
Returns a dictionary of (timestamp, words) where words contains the phrases spoken at `timestamp`.
:param buckets: Python list of bucket timestamps (from GT).
:param data: Dictionary of timestamps and words.
:return: Dictionary of phrases.
"""
idx = 0 # Bucket index.
phrases = {x: [] for x in buckets}
for i in range(len(data["timestamps"])):
ts, word = data["timestamps"][i], data["words"][i]
if idx + 1 < len(buckets) and ts >= buckets[idx + 1]:
idx += 1
phrases[buckets[idx]].append(word)
return phrases
|
48aa35198407fd2b562ef81a7fed5e8a2db8219e
| 57,086
|
import json
import requests
def parse_github(username):
"""
This function allows, according to a Github username, to return its node_id, its id as well as its name
Example:
parse_github("av1m") # ["MDQ6VXNlcjM2NDU2NzA5", 36456709, "Avi Mimoun"]
:param username: corresponds to the username Github
:return list: index 0 is node_id, index 1 is id, index 2 is name
"""
data = json.loads(requests.get("https://api.github.com/users/"+username).content)
return [data["node_id"], data["id"], data["name"]]
|
c33ff1411fd72d388ea6fb86c4f74961e2fa1479
| 57,089
|
def is_single_word_predicate(node):
"""
Return true iff the node represents a single-word, non-implicit, predicate
"""
return node.isPredicate \
and (len(node.text) == 1) \
and (not node.is_implicit())
|
b78f1dcdbf75dbf03ff22d3d14c6831cfc5f322a
| 57,090
|
def has(l , key, value):
"""Check if list has dict with matching key-value pair
Parameters
----------
l : List[Dict[str, Any]]
List to check for matches
key : str
Key to find in list's dictionaries
value : Any
Value to be compared with value in suitable key-value pair in each dictionary in list
Returns
-------
bool
True if list has dictionary with key-value pair matching given key and value, False otherwise.
"""
for item in l:
if item[key] == value:
return True
return False
|
a11685f4fa5b2a9932260db16e96cbc0ca240326
| 57,093
|
def update_server_size(tests_dataframe):
"""
Creates a new column, server_size, set all values to 0.
Parameters:
tests_dataframe: DataFrame
pandas dataframe object
Returns:
tests_dataframe: DataFrame
dataframe object with server_size column
"""
tests_dataframe['server_size'] = 0
return tests_dataframe
|
115c1830362d58bfc371335078a2a2cbc17d2446
| 57,094
|
import re
def replace_term(text, old_term, new_term):
"""Replaces term in the text with another term"""
result = re.sub(old_term, new_term, text)
return result
|
8c3e692ba90cf14c580ecd64a3a9030d85fdd2b3
| 57,099
|
import torch
from typing import List
def named_tensor_split(tensor: torch.Tensor, split_size: int, dim: str) \
-> List[torch.Tensor]:
"""
As working in torch.split but added list()
Args:
tensor (): the tensor to split
split_size (): the size of the splits
dim (): the dimension of each of the splits
Returns:
list of splits
"""
dim_index = tensor.names.index(dim)
return list(tensor.split(split_size, dim=dim_index))
|
0734623c159ad73c99bb4ba8cd5df675d5e2e063
| 57,107
|
def tzy_flatten(ll):
"""
Own flatten just for use of this context.
Expected outcome:
> flatten([1, 2, [3, 4], [5]])
[1, 2, 3, 4, 5]
Only goes 1 depth so should be O(n) time, but also O(n) space
"""
ret = []
for item in ll:
if isinstance(item, list):
for subitem in item:
ret.append(subitem)
else:
ret.append(item)
return ret
|
279fb4c3dec69d386e66ab3f56662dfa99e15079
| 57,109
|
def num_elements(num):
"""Return either "1 element" or "N elements" depending on the argument."""
return '1 element' if num == 1 else '%d elements' % num
|
56c48771cba2c5b51aaf0a9ff58cd9aa7a8a81e6
| 57,112
|
def elements_adjacent(elements, index1, index2):
"""Check if two elements are adjacent."""
return (
elements[0, index1] == elements[0, index2]
or elements[0, index1] == elements[1, index2]
or elements[0, index1] == elements[2, index2]
or elements[1, index1] == elements[0, index2]
or elements[1, index1] == elements[1, index2]
or elements[1, index1] == elements[2, index2]
or elements[2, index1] == elements[0, index2]
or elements[2, index1] == elements[1, index2]
or elements[2, index1] == elements[2, index2]
)
|
16a2d2d0cbd93199ffa21867dd4f4b6913f9b62a
| 57,113
|
import torch
def mixture_consistency(mixture, est_sources, src_weights=None, dim=1):
""" Applies mixture consistency to a tensor of estimated sources.
Args
mixture (torch.Tensor): Mixture waveform or TF representation.
est_sources (torch.Tensor): Estimated sources waveforms or TF
representations.
src_weights (torch.Tensor): Consistency weight for each source.
Shape needs to be broadcastable to `est_source`.
We make sure that the weights sum up to 1 along dim `dim`.
If `src_weights` is None, compute them based on relative power.
dim (int): Axis which contains the sources in `est_sources`.
Returns
torch.Tensor with same shape as `est_sources`, after applying mixture
consistency.
Notes
This method can be used only in 'complete' separation tasks, otherwise
the residual error will contain unwanted sources. For example, this
won't work with the task `sep_noisy` from WHAM.
Examples
>>> # Works on waveforms
>>> mix = torch.randn(10, 16000)
>>> est_sources = torch.randn(10, 2, 16000)
>>> new_est_sources = mixture_consistency(mix, est_sources, dim=1)
>>> # Also works on spectrograms
>>> mix = torch.randn(10, 514, 400)
>>> est_sources = torch.randn(10, 2, 514, 400)
>>> new_est_sources = mixture_consistency(mix, est_sources, dim=1)
References
Scott Wisdom, John R Hershey, Kevin Wilson, Jeremy Thorpe, Michael
Chinen, Brian Patton, and Rif A Saurous. "Differentiable consistency
constraints for improved deep speech enhancement", ICASSP 2019.
"""
# If the source weights are not specified, the weights are the relative
# power of each source to the sum. w_i = P_i / (P_all), P for power.
if src_weights is None:
all_dims = list(range(est_sources.ndim))
all_dims.pop(dim) # Remove source axis
all_dims.pop(0) # Remove batch dim
src_weights = torch.mean(est_sources**2, dim=all_dims, keepdim=True)
# Make sure that the weights sum up to 1
norm_weights = torch.sum(src_weights, dim=dim, keepdim=True) + 1e-8
src_weights = src_weights / norm_weights
# Compute residual mix - sum(est_sources)
if mixture.ndim == est_sources.ndim - 1:
# mixture (batch, *), est_sources (batch, n_src, *)
residual = (mixture - est_sources.sum(dim=dim)).unsqueeze(dim)
elif mixture.ndim == est_sources.ndim:
# mixture (batch, 1, *), est_sources (batch, n_src, *)
residual = mixture - est_sources.sum(dim=dim, keepdim=True)
else:
n, m = est_sources.ndim, mixture.ndim
raise RuntimeError(f'The size of the mixture tensor should match the '
f'size of the est_sources tensor. Expected mixture'
f'tensor to have {n} or {n-1} dimension, found {m}.')
# Compute remove
new_sources = est_sources + src_weights * residual
return new_sources
|
2ef8aa26b9c61edf989bbc3a211c6dd09f1e9bf2
| 57,115
|
def format_time(time, unit, delimiter=False):
"""
A function to format a unit of time, so it is readable when it is
output to Discord.
"""
if time > 0:
if time > 1:
unit += 's'
elapsed = f"{time} {unit}"
if delimiter:
elapsed += ','
return elapsed
return ''
|
46a21aae69dd77d082b1440a4cbe7416ce974d7c
| 57,116
|
def how_many_days(month_number):
"""Returns the number of days in a month.
WARNING: This function doesn't account for leap years!
"""
days_in_month = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
result = days_in_month[month_number - 1]
return result
|
aceab5f5fb72caff832a8a188189a5d3ba194413
| 57,117
|
def batch_to_device(batch, target_device):
"""
send a batch to a device
:param batch:
:param target_device:
:return: the batch sent to the device
"""
features = batch['features']
for paired_sentence_idx in range(len(features)):
for feature_name in features[paired_sentence_idx]:
features[paired_sentence_idx][feature_name] = features[paired_sentence_idx][feature_name].to(target_device)
labels = batch['labels'].to(target_device)
return features, labels
|
1b069ad110c06cf2a0bf8c0a9385937955228499
| 57,120
|
def get_email_template(
plugin_configuration: list, template_field_name: str, default: str
) -> str:
"""Get email template from plugin configuration."""
for config_field in plugin_configuration:
if config_field["name"] == template_field_name:
return config_field["value"] or default
return default
|
0fca7997b9747a50c881da556610dae3ef2cf342
| 57,123
|
def circular_between(start, bet, end):
"""Finds if bet is in between start and end
Arguments:
start {Integer}
bet {Integer}
end {Integer}
Returns:
Boolean -- True if it is in between, else False
"""
if end > start:
return (bet > start and bet < end)
elif end < start:
return (bet > start or bet < end)
|
bd9d06ce42795e8d32f21b0c07fb9aef1f67b88b
| 57,128
|
import typing
def flatten_catalog(catalog: typing.List[dict]) -> dict:
"""Turn the raw service catalog into a simple dict."""
return {
service['type']: {
endpoint['region']: endpoint['url'] for endpoint in service['endpoints']
} for service in catalog
}
|
0450e7a354f33ae4e133c4a3e250a7861cd2264f
| 57,130
|
def lyambda_fact_bot(m_distrib, R, P_mol, F_mol, phi):
"""
Calculates the the factor of masstransfer.
Parameters
----------
m_distrib : float
The distribution coefficient, [dismensionless]
R : float
The reflux number, [dismensionless]
P_mol : float
The flow rate of distilliat, [kmol/s]
F_mol : float
The flow rate of feed, [kmol/s]
phi : float
The fraction of vapor at the feed point
Returns
-------
lyambda_fact_bot : float
The factor of masstransfer, [dismensionless]
References
----------
Дытнерский, стр.239 формула 6.35
"""
return m_distrib * (P_mol * (R + 1) - F_mol*phi) / (P_mol * R + F_mol * (1- phi))
|
36af45afe68f14ea648df15efc1ef4e555d94e5c
| 57,131
|
def run(action, *args, **kwargs):
"""
:doc: run
:name: renpy.run
:args: (action)
Run an action or list of actions. A single action is called with no
arguments, a list of actions is run in order using this function, and
None is ignored.
Returns the result of the first action to return a value.
"""
if action is None:
return None
if isinstance(action, (list, tuple)):
rv = None
for i in action:
new_rv = run(i, *args, **kwargs)
if new_rv is not None:
rv = new_rv
return rv
return action(*args, **kwargs)
|
924227a1d182c7abc99419830605c0f278ba0dd8
| 57,133
|
def get_csv_header(
reviews_per_paper, num_similar, area_chairs=False, is_global=False
):
"""
Get the csv output header fields, based on the number of assigned and
similar reviewers
Args:
reviews_per_paper (int): Number of reviews received by each paper
num_similar (int): The number of similar reviewers/ACs to list for each
submission in addition to the assigned reviewers
area_chairs (bool): Whether or not this assignment is for ACs
is_global (bool): whether or not this header is for the global csv file
(if ``False``, assumed to be for the track-wise file)
Returns:
header (list): Header fields for the output csv file
"""
if area_chairs:
role = 'AC'
else:
role = 'reviewer'
# Include ID and assigned reviewers in both the global and track-wise csv
# headers
header = ['ID']
for x in range(1, reviews_per_paper + 1):
reviewer_field = f'Assigned {role} {x}'
score_field = f'Assigned {role} {x} score'
header += [reviewer_field, score_field]
# To the global header, add track and a flag for whether all reviewers are
# in the same track. To the track-wise header, add non-assigned similar
# reviewers, as well as SACs and ACs with COIs
if is_global:
header += ['Track', 'Assigned within same track']
else:
for x in range(1, num_similar + 1):
reviewer_field = f'Similar {role} {x}'
score_field = f'Similar {role} {x} score'
header += [reviewer_field, score_field]
header += ['SACs with COI', 'ACs with COI', 'Reviewers with COI']
return header
|
7dc2ffafcec0fc05b56bb8ab11d210691df75f06
| 57,135
|
def tobytes(s):
"""force_bytes(s) -> bytes
Ensures the given argument is of type bytes
Example:
>>> force_bytes(b'abc')
b'abc'
>>> force_bytes('abc')
b'abc'
>>> force_bytes(1)
Traceback (most recent call last):
...
TypeError: Expecting a value of type bytes or str, got 1
"""
if isinstance(s, bytes):
return s
elif isinstance(s, str):
return s.encode('utf8')
else:
raise TypeError('Expecting a value of type bytes or str, got %r' % s)
|
911061e1be21e2237ba65dc271d8e576e4823b23
| 57,137
|
import socket
def is_server_alive(host, port):
"""
Attempts to connect to a given host and port. If the connection is successful then True is returned. Otherwise,
False is returned.
:param host: the target host name or ip address.
:param port: the target port.
:return: True if the connection to the provided host and port is successful. False if the connection is refused.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((host, port))
s.shutdown(2)
return True
except ConnectionRefusedError:
return False
|
9bf276e42c69e471b532049939e1192b7600cd32
| 57,139
|
import math
def dist(p: list, q: list) -> float:
"""
Returns the Euclidean distance between points p and q. Tries to emulate
Python 3.8's math.dist(p, q) function.
Citation: https://docs.python.org/3/library/math.html
:param p: A multi-dimensional (dimension >= 1) point in the form of a list
:param q: A multi-dimensional (dimension >= 1) point in the form of a list
:return: Euclidean distance between points p and q
"""
return math.sqrt(sum((px - qx) ** 2.0 for px, qx in zip(p, q)))
|
7690d02fcbc9e2d18a94324e6e3c95bad56d279e
| 57,140
|
def dataframe_series_summary(series_summaries: dict) -> dict:
"""Aggregate certain statistics from the series summary
Args:
series_summaries: mapping from series to summary
Returns:
A aggregated summary based on the statistics of individual series
"""
summary = {"na_count": 0, "n_vars_missing": 0}
for series_summary in series_summaries.values():
if "na_count" in series_summary and series_summary["na_count"] > 0:
summary["na_count"] += series_summary["na_count"]
return summary
|
db1c1e2d27b5c95782759e9b67f372ef86a6aebe
| 57,141
|
def cpu_usage_for_process(results, process):
"""
Calculate the CPU percentage for a process running in a particular
scenario.
:param results: Results to extract values from.
:param process: Process name to calculate CPU usage for.
"""
process_results = [
r for r in results if r['metric']['type'] == 'cputime' and
r['process'] == process
]
cpu_values = sum(r['value'] for r in process_results)
wallclock_values = sum(r['wallclock'] for r in process_results)
if wallclock_values > 0:
return float(cpu_values) / wallclock_values
return None
|
8fc962cf9f7d1fcab1b8a223b4c15dd2eb0e51f4
| 57,146
|
import ast
def get_nth_argument_name(arguments: ast.arguments, n: int) -> str:
"""Return the name of the nth positional argument, or Ɛ if out-of-range."""
_arguments = arguments.args
if len(_arguments) == 0:
return ""
if not (0 <= n < len(_arguments)):
return ""
return _arguments[n].arg
|
913c869a894413d8a715455e967e89f9ab29626b
| 57,148
|
def _build_gcs_destination(config):
"""Builds a GcsDestination from values in a config dict.
Args:
config: All the user-specified export parameters. Will be modified in-place
by removing parameters used in the GcsDestination.
Returns:
A GcsDestination containing information extracted from
config.
"""
cloud_storage_export_destination = {'bucket': config.pop('outputBucket')}
if 'outputPrefix' in config:
cloud_storage_export_destination['filenamePrefix'] = config.pop(
'outputPrefix')
if config.pop('writePublicTiles', False):
cloud_storage_export_destination['permissions'] = 'PUBLIC'
return cloud_storage_export_destination
|
54eeb5fbc747a7bee52fe286f0b4e7fe1a055143
| 57,150
|
def bytes_from(path):
"""
Return the bytes from a file.
:param path: file path string
"""
with open(path, "rb") as fp:
return fp.read()
|
04addff5f07ee3714d51d125bdc0f5917238a812
| 57,156
|
def get_drug_dcid(row):
"""Returns dcid of a drug.
If the ChEMBL ID of the drug was not found, then a new dcid for the drug is
created based on the pharmGKB id.
"""
if row['ChEMBL ID']:
return 'bio/' + row['ChEMBL ID']
return 'bio/' + row['PharmGKB Accession Id']
|
a8b5b5d639e4aa07a6778f589b23415106658b45
| 57,157
|
import hashlib
def filehash(filepath):
"""Compute the hash of a file.
Args:
filepath (str): Path to the file.
"""
hasher = hashlib.md5()
with open(filepath, 'rb') as f:
hasher.update(f.read())
return hasher.hexdigest()
|
28419c227ccea3cd23960c59abe1dfc981bf9ed2
| 57,161
|
import asyncio
def synchronize_generator(async_generator, *args, **kwargs):
"""
Returns a synchronous generator from an asynchronous generator
"""
ag = async_generator(*args, **kwargs)
async def consume_generator(stop_signal):
r = await ag.asend(stop_signal)
return r
loop = asyncio.new_event_loop()
try:
stop_signal = None
while not stop_signal:
val = loop.run_until_complete(consume_generator(stop_signal))
stop_signal = yield val
if stop_signal:
val = loop.run_until_complete(consume_generator(stop_signal))
loop.close()
loop = None
yield val
except StopAsyncIteration:
loop.close()
loop = None
assert loop is None
|
c301b333491d35cf654ac2eccbc6d4226485e805
| 57,163
|
def coverage(index_seq, segment_list, qseqseq):
"""
Coverage of an index_seq, which is just the ratio of the length of the concatenation to the
length of the original string.
"""
return (segment_list[index_seq[-1]][2] - segment_list[index_seq[0]][1]) / len(qseqseq)
|
a1d0666bfed13adf308fa7362c7c4653d325a422
| 57,165
|
def mk_scan_mapper(condition_map, dflt=None):
"""Make function implementing an if/elif/.../else logic from a {bool_func: x, ...} map"""
def scan_mapping(x):
for condition, then in condition_map.items():
if condition(x):
return then
return dflt
return scan_mapping
|
8787cf7783b124029ce28f908c5a49c25ef5de33
| 57,173
|
def do_while_loop(printer, ast):
"""Prints "do {body} while (cond)"."""
cond_str = printer.ast_to_string(ast["cond"])
body_str = printer.ast_to_string(ast["body"])
return f'do {body_str} while ({cond_str});'
|
ef98227dc3f405000fdbc49b29eee8ad571818b6
| 57,176
|
def fibonacci(n):
"""
Generate a list of fibonacci numbers up to and including n.
"""
result = [1, 1]
if n <= 2:
return result[0:n]
counter = 2
while True:
next_fib = result[counter-2] + result[counter-1]
if next_fib > n:
break
result.append(next_fib)
counter += 1
return result
|
e497f204720106d56d6dffee34d8ad39e7409d08
| 57,178
|
def get_video_file_paths(input_dir):
"""From a directory, return a list of video file paths, sorted based
on commonly used video file exstensions.
Args:
input_dir (Path): The folder in which we'll search for videos.
Returns:
List: Video file paths found in directory
"""
# Get all files in folders and sub-folders
# files = get_all_files_in_dir(input_dir)
VIDEO_FILE_TYPES = (
".avi",
".mp4",
".mkv",
".webm",
".mpeg",
".ogg",
".m4v",
".wmv",
".mov",
".flv",
)
video_files = []
entries = input_dir.iterdir()
# entries = sorted(entries, key=lambda entry: entry.is_file())
for f in entries:
if f.suffix in VIDEO_FILE_TYPES:
video_files.append(f)
return video_files
|
b9264fe02e2d06b4b07b27636bfe0a253e1fe8f1
| 57,179
|
import io
def parse(stdin: io.TextIOWrapper) -> list:
"""
Parse the input into a list of tuples: string direction and int distance.
"""
return [
(line[0], int(line[1:]))
for line in stdin.read().strip().splitlines()
]
|
7513285adb9fc5dca2d69b1b4d920589ecc15df4
| 57,181
|
def cantor(a, b):
"""Cantor pairing function, used to give unique int name to each observation"""
a = int(a)
b = int(b)
return (a + b) * (a + b + 1) / 2 + b
|
04eeb7caef77516b6e1efca5731427a2e1a3c63e
| 57,187
|
def inputs_vocabulary(vocabulary):
"""Get the inputs vocabulary.
Args:
vocabulary: Vocabulary or (inputs_vocabulary, targets_vocabulary) tuple.
Returns:
a Vocabulary
"""
if isinstance(vocabulary, tuple):
vocabulary = vocabulary[0]
return vocabulary
|
2c816dd32d5d7a0afba111e052c55153ce4a189e
| 57,190
|
def convert_distance_to_weight(dist_matrix):
"""
Translate raw distance-to-edge data into weight using parabolic equation.
Used to give higher priority to pixels located closer to an edge.
:param dist_matrix: matrix of distances to edges from the distance transform
:return: adjusted dist_matrix
"""
height, width = dist_matrix.shape[:2]
max_dist = width // 30
a = -1.0 / (max_dist ** 2)
dist_matrix = a * (dist_matrix ** 2) + 1.0
neg_indices = dist_matrix < 0.0
# noinspection PyUnresolvedReferences
dist_matrix[neg_indices] = 0.0
return dist_matrix
|
84cc4e1ceca167c19d57b580880043b97517ccd8
| 57,192
|
import inspect
def GetFileAndLine(component):
"""Returns the filename and line number of component.
Args:
component: A component to find the source information for, usually a class
or routine.
Returns:
filename: The name of the file where component is defined.
lineno: The line number where component is defined.
"""
if inspect.isbuiltin(component):
return None, None
try:
filename = inspect.getsourcefile(component)
except TypeError:
return None, None
try:
unused_code, lineindex = inspect.findsource(component)
lineno = lineindex + 1
except (IOError, IndexError):
lineno = None
return filename, lineno
|
892710e43ffd60b196dd8121dd215aa3c1806770
| 57,193
|
import re
def unsupported_arguments(doc, args):
""" Mark unsupported arguments with a disclaimer """
lines = doc.split("\n")
for arg in args:
subset = [
(i, line)
for i, line in enumerate(lines)
if re.match(r"^\s*" + arg + " ?:", line)
]
if len(subset) == 1:
[(i, line)] = subset
lines[i] = line + " (Not supported in Dask)"
return "\n".join(lines)
|
ea357b7fcc8101aba1fc811a07cd89287958d9a7
| 57,195
|
def geometric_series(common_ratio, number_of_images, first_term=1):
"""
This will provide the geometric series for the integration.
Last values of the series has to be less than or equal to number
of images
ex: number_of_images = 100, first_term =1
common_ratio = 2, geometric_series = 1, 2, 4, 8, 16, 32, 64
common_ratio = 3, geometric_series = 1, 3, 9, 27, 81
Parameters
----------
common_ratio : float
common ratio of the series
number_of_images : int
number of images
first_term : float, optional
first term in the series
Return
------
geometric_series : list
time series
Notes
-----
.. math::
a + ar + ar^2 + ar^3 + ar^4 + ...
a - first term in the series
r - is the common ratio
"""
geometric_series = [first_term]
while geometric_series[-1] * common_ratio < number_of_images:
geometric_series.append(geometric_series[-1] * common_ratio)
return geometric_series
|
f0951f3a54aba71df109a15d07637d2cea01117a
| 57,198
|
def wrap(x, vmin, vmax):
"""Wrap a value x within bounds (vmin,vmax)"""
return (x-vmin) % (vmax-vmin)+vmin
|
fb0556ccf8d9df3cf96f663aef54ad1a43384d77
| 57,199
|
def _match_stack(selector, stack):
"""Match stacks using selector as the comparison var.
Strings prefixed with ^ will be treated as negative
Args:
selector (str|list[str]): String tokens or list used for matching
stack (:obj:`stackformation.BaseStack`): stack used for comparison. Will match on get_stack_name() and get_remote_stack_name()
Returns:
:obj:`stackformation.BaseStack`: If stack matches
bool: False if selector did not match
""" # noqa
if not isinstance(selector, list):
selector = selector.split(' ')
selector = [i.lower() for i in selector]
pos = []
neg = []
sn = stack.get_stack_name().lower()
rn = stack.get_remote_stack_name().lower()
result = False
for s in selector:
if s[0] == "^":
neg.append(s[1:])
else:
pos.append(s)
for s in pos:
if (s in sn) or (s in rn):
result = True
for s in neg:
if (s in sn) or (s in rn):
result = False
return result
|
b909fe8fe41820c00b1c48b46fe49910031eac11
| 57,200
|
def day2dekad(day):
"""Returns the dekad of a day.
Parameters
----------
day : int
Day of the date.
Returns
-------
dekad : int
Number of the dekad in a month.
"""
if day < 11:
dekad = 1
elif day > 10 and day < 21:
dekad = 2
else:
dekad = 3
return dekad
|
2a6d0918da008ed81f4f97bf1146a4e0e13c0a56
| 57,202
|
def calc_max_quant_value(bits):
"""Calculate the maximum symmetric quantized value according to number of bits"""
return 2**(bits) - 1
|
0cd845c06aef8742cf62132b6569ed725fae4147
| 57,206
|
def enumerate_ids(locationlist):
"""For a given list of locations, give them all an ID"""
counter = 1
for location in locationlist:
location.insert(0,"LOCID%s" % (counter))
counter+=1
return locationlist
|
14ec2972881e48904736be658c56456b8bb14b85
| 57,209
|
def substitute_rpath(orig_rpath, topdir, new_root_path):
"""
Replace topdir with new_root_path RPATH list orig_rpath
"""
new_rpaths = []
for path in orig_rpath:
new_rpath = path.replace(topdir, new_root_path)
new_rpaths.append(new_rpath)
return new_rpaths
|
eb69a0807558b5b7d122c4d4149069b04c05e8ec
| 57,211
|
def clean_dict(target, keys):
"""Rebuild a new dictionary from requested keys.
Args:
target (dict): Dictionary to extract keys from.
keys (list): List of keys to extract. If one key cannot be found in
target, it will be ignored.
Returns:
dict: Dictionary with requested keys.
"""
return {key: target[key] for key in keys if key in target}
|
2efa7797b475af7fd7aa23cba6a133844395f05a
| 57,214
|
def is_set_policy(policy):
""" Checks if the current policy action is a set action. """
return 'setIamPolicy' in policy['action'] and not 'metadata' in policy
|
33bf358ec78a1ddf023b2fb890f4b067b6263273
| 57,216
|
def least_larger(arr: list, idx: int) -> int:
""" This function returns the index of the least number larger than the element at the given index, or -1 if there is no such index. """
if len(arr) < 0:
return -1
my_element = [i for i in arr if i > arr[idx]]
if len(my_element) > 0:
return arr.index(min(my_element))
return -1
|
e181cd9cbe8bf5f49c400125b04d1028ff79880e
| 57,217
|
def get_yaml_version(fd, token):
"""
A YAML token is found, so see if the YAML version can be parsed.
Parameters
----------
fd : GenericFile
token : bytes
The YAML token
Return
------
yaml_version: tuple
"""
offset = fd.tell()
while True:
c = fd.read(1)
token += c
if b"\n" == c:
break
fd.seek(offset)
# Expects a string looking like '%YAML X.X'
yaml_version = None
line = token.decode("utf-8").strip()
sl = line.split(" ")
if len(sl) == 2:
yaml_version = tuple([int(x) for x in sl[1].split(".")])
return yaml_version
|
ac6375e698f40c15f2e7f87a8bcd1d1dd5a8c2a7
| 57,227
|
def safe_dict(obj_or_func, **kwargs):
"""
Create a dict from any object with all attributes, but not the ones starting with an underscore _
Useful for objects or function that return an object that have no __dict__ attribute, like psutil functions.
"""
if callable(obj_or_func):
res = obj_or_func(**kwargs)
else:
res = obj_or_func
if hasattr(res, '__dict__'):
return res.__dict__
attributes = [i for i in dir(res) if not i.startswith('_')]
out = {}
for a in attributes:
val = getattr(res, a)
if val and not callable(val):
out[a] = val
return out
|
a0c7118f1a1d4144e635b295f0bb8bde36424eec
| 57,243
|
def view_exists(spark_session, view_name):
"""Return True if the specified view exists"""
try:
_ = spark_session.read.table(view_name)
return True
except:
return False
|
c21998d87c9cc5715d1b3ef93bfea0b9cfecc19d
| 57,248
|
def sum_all_but_dims(dims_to_keep, da):
"""Sum a xr.DataArray along all dimensions except those specified."""
dims_to_sum = [dim for dim in da.dims if dim not in dims_to_keep]
return da.sum(dim=dims_to_sum)
|
eb9412d29666076935537c9c385adf9088cd5464
| 57,249
|
def pam4_decision(x,l,m,h):
"""produces pam4 symbol from analog voltage
Parameters
----------
x : float
the analog voltage
l: float
voltage threshold between 0 and 1 symbol
m: float
voltage threshold between 1 and 2 symbol
h: float
voltage threshold between 2 and 3 symbol
Returns
-------
int
the pam4 symbol that represents x
"""
if x<l:
return 0
elif x<m:
return 1
elif x<h:
return 2
else:
return 3
|
dbfed2709ab87f138384a6edd9a1b679b6516001
| 57,251
|
def create_slack_attachment(fallback,
color=None,
pretext=None,
author_name=None,
author_link=None,
author_icon=None,
title=None,
title_link=None,
text=None,
fields=None,
image_url=None,
thumb_url=None,
footer=None,
footer_icon=None,
ts=None
):
"""Create slack attachment payload
See https://api.slack.com/docs/message-attachments for more info.
Arguments:
fallback - Required plain-text summary of the attachment
[color] - Colour of the attachment
[pretext] - Optional text that appears above the attachment block
[author_name]
[author_link]
[author_icon] - URL to author icon
[title] - Title of the attachment
[title_link]
[text] - Optional text that appears inside the attachment
[fields] - Array of dicts containing more values
[image_url] - URL to image attached
[thumb_url] - URL to image thumbnail
[footer] - Footer message
[footer_icon] - URL to footer icon
[ts] - timestamp
"""
arguments = locals() # Must be first line in function
attachment = {
key: value
for key, value in arguments.items()
if value is not None
}
return attachment
|
cea633beffa534f896506ab239c2c67866157efa
| 57,253
|
def length(segments):
"""Computes length of segments.
Args:
segments (numpy array or torch tensor): shape [N, 2] holding N
segments.
Returns:
a numpy array (or torch tensor) with shape [N] representing segment
length.
Note:
it works with time, it would be off if using frames.
"""
return segments[:, 1] - segments[:, 0]
|
a8411448358eb96324771dcf2cd33dfd4345375a
| 57,256
|
def report_status_cell(i):
"""Make and return the report_status cell at Column F."""
return "F{}".format(str(i))
|
ace90193326ecdf4e11dfe016d393cdaf9d1ff88
| 57,257
|
import math
def angle(p):
"""
p = (x, y), return angle from y axis
"""
x, y = p
p = x, -y
a = math.pi / 2 - math.atan2(*reversed(p))
if a < 0:
a += 2 * math.pi
return a
|
41114deb7de3d93159b4d219ce5c129b160e7a53
| 57,258
|
def _Pairs(data):
"""dictionary -> list of pairs"""
keys = sorted(data)
return [{'@key': k, '@value': data[k]} for k in keys]
|
d2627a21cfa34efccb68fe977780d33e9024173c
| 57,260
|
def calculate_calibrated_value(image_mean, vector):
"""
Solves the calibration equation that finds the optimal low bound value for
the saturation and value.
:param image_mean: the mean if the image of which
:param vector: the dictionary containing the coefficients and group mean.
Calculated using Color HSVCalibration
:return: the optimal low bound
"""
data_mean = vector['mean'][0]
z_mean = data_mean[0] * vector['coefficient1'] + data_mean[1] * vector['coefficient2']
return (z_mean - (image_mean * vector['coefficient1'])) / vector['coefficient2']
|
71a379eca4a3c2a6889058a0fbdcf2a8b0f779ae
| 57,265
|
def edit_title(ax, text, prepend=False, append=False):
"""
Adds text to beginning or end of existing title on axes
Args:
ax (plt.Axes): Axes with title to edit
text (str): Text to add to beginning or end of title
prepend (bool): Prepend text (can't do both at same time)
append (bool): Append text (can't do both at same time)
Returns:
plt.Axes: ax adjusted
"""
assert prepend ^ append # Assert 1 and only 1 is True
t = ax.title.get_text()
if prepend:
ax.title.set_text(f'{text}{t}')
elif append:
ax.title.set_text(f'{t}{text}')
else:
raise NotImplementedError
return ax
|
a238c07a07fc9dc1e31096b00dbeddd68802fb28
| 57,273
|
import torch
def logsumexp(x, dim=None, keepdim=False):
"""Numerically stable logsumexp.
from https://github.com/pytorch/pytorch/issues/2591
Args:
inputs: A Variable with any shape.
dim: An integer.
keepdim: A boolean.
Returns:
Equivalent of log(sum(exp(inputs), dim=dim, keepdim=keepdim)).
"""
if dim is None:
x, dim = x.view(-1), 0
xm, _ = torch.max(x, dim, keepdim=True)
x = torch.where(
(xm == float('inf')) | (xm == float('-inf')),
xm,
xm + torch.log(torch.sum(torch.exp(x - xm), dim, keepdim=True)))
return x if keepdim else x.squeeze(dim)
|
f7e63a2a297352aeeb4bd4d2a3d73eb1ceacd22f
| 57,274
|
import decimal
def format_coordinate(number: decimal.Decimal) -> str:
"""Returns decimal in the format required by task.
2.0000 -> 2
2.104 -> 2.1
"""
return "{:.2f}".format(number).rstrip("0").rstrip(".")
|
012e651b512e6fa280ca1c34a0a06e44a41c2632
| 57,275
|
def errs_tab(n):
"""Generate list of error rates for qualities less than equal than n."""
return [10**(q / -10) for q in range(n + 1)]
|
dbece04706ec9d319ee3ce1a81648f34998180c7
| 57,279
|
def _GetGcsPath(source, error_log):
"""Get the path to the folder on Google cloud storage."""
batch_sim_bucket = 'gs://makani/batch_sim/'
gcs_folder = batch_sim_bucket + source
if not error_log:
gcs_folder += '/h5_logs/'
else:
gcs_folder += '/error/'
return gcs_folder
|
0e33a3f04f3edf55d9eed4b77357c08c9957ff50
| 57,282
|
def _progress_bar(count, total):
"""
Get a simple one-line string representing a progress bar.
Arguments:
count (int): Current count. Starts at 0.
total (int): Total count.
Returns:
pbar_string (str): The string progress bar.
"""
bar_len = 60
filled_len = int(round(bar_len * (count + 1) / float(total)))
bar = '=' * filled_len + '-' * (bar_len - filled_len)
return '[{}] {}/{}'.format(bar, count + 1, total)
|
31af780be8486145afc5a2a81d73eae3fb50f841
| 57,292
|
def get_depth(string):
"""Calculates amount of whitespaces leading the given string and returns int"""
# expected input (example):
# string = [' 09140 Cellular Processes']
#
# expected output (example):
# depth = 13
depth = len(string) - len(string.lstrip(' '))
return depth
|
c2e8c2b8c480c2c990d3e7a3f06578e26b9440bb
| 57,293
|
import itertools
def list_product(*args):
"""
Generate a list containing all combinations of the input lists. Outputs a list rather than an
annoying generator.
Parameters
----------
*args : list, tuple
Returns
-------
list
Combined list.
"""
return list(itertools.product(*args))
|
f5c45ddbb87821f8776faa1f83eaae673c83b696
| 57,295
|
def get_classes(classes_path):
""" Loads classes from text file
Input:
classes_path (string) - The path of classes
Output:
class_names (list) - Class names in list format
class_mappings(dict) - Dictionary mapping class names with id e..g {"aeroplan": 4, "apple": 47 ...}
"""
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
class_mappings = dict(zip(class_names, range(len(class_names))))
return class_names, class_mappings
|
2df39a806573288cf90c53ea9d9e040568ca556a
| 57,296
|
from typing import Dict
from typing import Callable
def transform(data: Dict, *modifiers: Callable[[Dict], None]) -> Dict:
"""
Generic functions that allows to generate new dictionary
by applying any number of modifications on the input dictionary
:param data: original dictionary
:param modifiers: functions to apply on the dictionary
:return: generated dictionary
"""
for modify in modifiers:
modify(data)
return data
|
f0916c9123ac90990329f90f9e732018bd307eea
| 57,300
|
def resize_image(image, resize_width, resize_height):
""" Resizes image to the given dimensions in pixels.
Args:
image (Image): The image to be resized.
resize_width (int): The width in pixels of the new image.
resize_height (int): The height in pixels of the new image.
Returns:
Image: Returns an Image object with new dimensions.
"""
image_resized = image.resize((resize_width, resize_height))
return image_resized
|
3b8c2b86ac2f6e4ac80357dc1bc66f3b70010b50
| 57,301
|
def strip(cc):
"""Strip all non numeric characters.
@type cc: str
@param cc: String.
@rtype: str
@return: Stripped string.
"""
return ''.join([c for c in cc if c.isdigit()])
|
041bd83f1b87854122916780d3ac975c32ae58ef
| 57,304
|
def normalize_column_to_midi_range(column, target_range, target_min_value):
"""
Normalizes the values to the values range used by the MIDI format
:param column: A list with the values that should be sonified
:param target_range: The target range
:param target_min_value: The target minimum value
:return: A list with the transformed values
"""
# determine min and max of the array
min_value = min(column)
max_value = max(column)
value_range = max_value - min_value
# conduct the transformation and return it
transformed_column = []
for row in column:
ratio = (row - min_value) / value_range
# print(ratio)
end_note = int(ratio * target_range + target_min_value)
transformed_column.append(end_note)
return transformed_column
|
7aaefa83655baf11e2472f25b302f7623db4aa9d
| 57,305
|
def optimizer_kwargs_gf(parsed_args):
"""
Build kwargs for optimizer in optimizer.py from
the parsed command-line arguments.
"""
return {
'optim': parsed_args.optim,
'lr': parsed_args.lr_gf,
'weight_decay': parsed_args.weight_decay,
'momentum': parsed_args.momentum,
'sgd_dampening': parsed_args.sgd_dampening,
'sgd_nesterov': parsed_args.sgd_nesterov,
'rmsprop_alpha': parsed_args.rmsprop_alpha,
'adam_beta1': parsed_args.adam_beta1,
'adam_beta2': parsed_args.adam_beta2
}
|
87489bfb50ff34df7f58b3b6178cff71f169cdb5
| 57,314
|
def clean_doc(doc):
"""Remove spurious end-lines and such from the doc-string specified in
the comment preceding the C++-python functions.
"""
# Replace regular enter (i.e. mere comment formatting in cpp file)
# with space
doc = doc.replace("\n", " ")
# The removal can cause a "hard enter" (literal \n) to get an unintended
# trailing space - trim those.
doc = doc.replace("\\n ", "\\n")
return '"%s"' % doc
|
43b9df1c888f509ca85c6e82696f287c47c8c8ed
| 57,317
|
def getDescribe(series, percentiles = [.25, .5, .75]):
"""Get describe of series
Args:
series (Series): data series
percentiles: the percentiles to include in the output
Returns:
Series: the describe of data include mean, std, min, max and percentiles
"""
d = series.describe(percentiles)
return d.drop('count')
|
ce95f06081617a591ca94de9d2025983efcaea68
| 57,318
|
def calculate_tax(price, tax_rate):
"""Calculates tax given a price and tax rate"""
total = price + (price*tax_rate)
my_price = 10000
print("my_price (inside function) =", my_price)
return total
|
f189ae14dc5cd171111b5822f1c60bc81ca9fded
| 57,319
|
def ultimoNome(string: str) -> str:
"""
Devolve a última palavra de uma string (sobrenome)
Parameters
----------
string : str
texto (nome completo)
Returns
-------
string : str
texto (sobrenome)
"""
string = string.replace('.','').replace(',',' ').split(' ')[-1]
return string
|
c3e30d069bcf6afccf7cc8730c2596b285b8a2c1
| 57,324
|
import textwrap
def catalog_file(tmpdir_factory):
"""Prepare a single-entry catalog that points to actual data on pangaea.de."""
catalog_str = textwrap.dedent(
"""
metadata:
version: 1
plugins:
source:
- module: intake_pangaeapy
sources:
M85_1_bottles:
driver: pangaeapy
description: |
Kieke, Dagmar; Steinfeldt, Reiner (2015): Physical oceanography,
CFC-11, and CFC-12 measured on water bottle samples during METEOR
cruise M85/1. PANGAEA, https://doi.org/10.1594/PANGAEA.854070
args:
pangaea_doi: "10.1594/PANGAEA.854070"
"""
)
catalog_filename = tmpdir_factory.mktemp("catalog").join("example_catalog.yaml")
with open(catalog_filename, mode="w") as f:
f.write(catalog_str)
return catalog_filename
|
81a4e3b317e21c320275409e5049fc31810365ec
| 57,328
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.