content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def frame_msg(msg):
"""Frame a message with hashes so that it covers five lines."""
return "###\n#\n# {0}\n#\n###".format(msg) | df7b5e30173a4b25c5100ba931461de60a5cc9c9 | 128,808 |
def vl2set(vl):
"""Convert VerletList object into set of pairs."""
pairs = set()
for i in range(vl.n_atoms):
vli = vl.verlet_list(i)
n_pairs_i = len(vli)
for j in vli:
pair = (i,j) if i<j else (j,i)
# print(pair)
pairs.add(pair)
return pairs | 0b550d2f94274b464f2a9572542769492634531f | 128,810 |
import yaml
from pathlib import Path
def oci_image(metadata_file: str, image_name: str) -> str:
"""Find upstream source for a container image.
Args:
metadata_file: string path of metadata YAML file relative
to top level charm directory
image_name: OCI container image string name as defined in
metadata.yaml file
Returns:
upstream image source
Raises:
FileNotFoundError: if metadata_file path is invalid
ValueError: if upstream source for image name can not be found
"""
metadata = yaml.safe_load(Path(metadata_file).read_text())
resources = metadata.get("resources", {})
if not resources:
raise ValueError("No resources found")
image = resources.get(image_name, {})
if not image:
raise ValueError("{} image not found".format(image_name))
upstream_source = image.get("upstream-source", "")
if not upstream_source:
raise ValueError("Upstream source not found")
return upstream_source | 0ae4a1ff4bef3aa6c1bd3736a588b40a8d1f5e03 | 128,816 |
def yolobox2label(box, info_img):
"""
Transform yolo box labels to yxyx box labels.
Args:
box (list): box data with the format of [yc, xc, w, h]
in the coordinate system after pre-processing.
info_img : tuple of h, w, nh, nw, dx, dy.
h, w (int): original shape of the image
nh, nw (int): shape of the resized image without padding
dx, dy (int): pad size
Returns:
label (list): box data with the format of [y1, x1, y2, x2]
in the coordinate system of the input image.
"""
h, w, nh, nw, dx, dy = info_img
y1, x1, y2, x2 = box
box_h = ((y2 - y1) / nh) * h
box_w = ((x2 - x1) / nw) * w
y1 = ((y1 - dy) / nh) * h
x1 = ((x1 - dx) / nw) * w
label = [y1, x1, y1 + box_h, x1 + box_w]
return label | 154cf7ba2331a92713f0946c0b80f441c698f9f8 | 128,818 |
from typing import Union
def binary_search(sorted_list: list, searched_item) -> Union[int, None]:
"""Performs binary search in a given sorted list
Args:
sorted_list: a sorted list to look into
searched_item: item to search for
Returns:
an index of the searched item or None if not found
"""
lower = 0
upper = len(sorted_list) - 1
while upper >= lower:
index = (upper + lower) // 2
if sorted_list[index] == searched_item:
return index
else:
if searched_item > sorted_list[index]:
lower = index + 1
else:
upper = index - 1
return None | 6017e72ffb6af163010d42aab5b807cd303cb4c7 | 128,826 |
def get_db_tags(db, user_id):
"""Get a user's tags from the db"""
return db.execute("SELECT tag FROM tags WHERE user_id = ?", [user_id]).fetchall() | 5dcd32a21e95bd7ccaa16b7d157809a585934956 | 128,827 |
def shell_sort(li):
""" [list of int] => [list of int]
Shell sort: arranges the list of elements so that,
starting anywhere, considering every hth element
gives a sorted list. Such a list is said to be h-sorted.
Beginning with large values of h, this rearrangement allows
elements to move long distances in the original list,
reducing large amounts of disorder quickly, and leaving
less work for smaller h-sort steps to do.
Determining which values of h we should use is a continuing problem
in computer science. I'll be using the simple sequence of powers of two,
which seem to work best for me.
See https://en.wikipedia.org/wiki/Shellsort for more information.
"""
# calculating values of h (gaps), 1 is always the last gap (obviously)
gaps = []
k = 1
while 2 ** k < len(li):
gaps.append(2 ** k)
k += 1
# sorting by gaps, starting with the largest (last) gap
for gap in reversed(gaps):
# iterate through unsorted values as li[0:gap-1] is considered sorted
# by virtue of being the only value in our sorted list
for i in range(gap, len(li)):
# select value to be inserted
value = li[i]
# new counter variable that can be changed, so i
# variable stays unchanged
j = i
# check value against the element h spaces before
# if the element h spaces before is larger, we need to swap
while j >= gap and li[j - gap] >= value:
li[j] = li[j - gap]
# go back gap spaces to check value against element h
# space before, in case another swap is needed
j -= gap
# replace the original value
li[j] = value
return li | 30d9b30fa924ae40322f085236c364a4c60eae76 | 128,828 |
import asyncio
def _async_callback(coro):
"""Wrap an asynchronous callback to be called from a synchronous context"""
return lambda val: asyncio.create_task(coro(val)) | c19b9543711a7edfba70bc61abcf435e977f5486 | 128,831 |
def find_sample(metric, suffix, labels):
"""Find a sample where all the provided labels match"""
full_sample_name = f"{metric.name}{suffix}"
for sample in metric.samples:
found = sample.name == full_sample_name
for k, v in labels.items():
if sample.labels.get(k, None) != v:
found = False
break
if found:
return sample
return None | 07e1a120fe16c2263b7d16a5c10c3ecc43f8fcd7 | 128,834 |
def is_vowel(letter : str) -> bool:
"""
Returns if a letter is a vowel
Parameters:
letter (str): A character of a string.
Returns:
result (bool): True if is a vowel, False otherwise.
"""
result = letter.lower() in ["a", "e", "i", "o", "u"]
return result | 9d0f90aa2bf712cc6a12e4ac4824c8fa8f93c4a4 | 128,841 |
def is_in_domain(i, j, m, n):
"""
Checks if i,j inside m,n
"""
return (i >= 0) and (i < m) and (j >= 0) and (j < n) | 3ea1b47cc314be46f9a12158d09246eac3e83733 | 128,843 |
def is_empty (virtualizer, skipped=('version', 'id')):
"""
Return True if the given Virtualizer object has no important child element.
:param virtualizer: virtualizer object
:type virtualizer: :class:`Virtualizer`
:param skipped: non-significant child name
:type skipped: tuple or list
:return: is empty
:rtype: bool
"""
next_child = virtualizer.get_next()
while next_child is not None:
# Skip version tag (old format) and id (new format)
if next_child.get_tag() not in skipped:
return False
else:
next_child = next_child.get_next()
return True | 39171d77025159ead2700b8b8624b4d28678d7f1 | 128,847 |
def _custom_formatargvalues(
args, varargs, varkw, locals,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value)):
"""Copied from inspect.formatargvalues, modified to place function
arguments on separate lines"""
def convert(name, locals=locals,
formatarg=formatarg, formatvalue=formatvalue):
return formatarg(name) + formatvalue(locals[name])
specs = []
for i in range(len(args)):
specs.append(convert(args[i]))
if varargs:
specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
if varkw:
specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
result = '(' + ', '.join(specs) + ')'
if len(result) < 40:
return result
else:
# Put each arg on a separate line
return '(\n ' + ',\n '.join(specs) + '\n)' | 644568f56f51f1287790b472693dd08b80f8728f | 128,850 |
from datetime import datetime
def days_between(d1, d2):
"""Calculate the difference in days between two days
Parameters
----------
d1 : str
The first date string
d2 : str
The second date string
Returns
-------
day_diff: int
The difference between the two dates in days
"""
d1 = datetime.strptime(d1, "%Y-%m-%d %H:%M:%S")
d2 = datetime.strptime(d2, "%Y-%m-%d %H:%M:%S")
day_diff = abs((d2 - d1).days)
return day_diff | 1819a3ed5153a4af7a01299580dd6d6837ec6050 | 128,851 |
def get_output(response, more_out=False):
"""
Returns final output to display
:param response: WoppResponse object to get the info
:param more_out: more or less information?
:return: dict containing output information
"""
out_dict = {
'name': response.name,
'current_version': response.latest_version,
'summary': response.summary,
'homepage': response.homepage,
'package_url': response.package_url,
'author': response.author,
}
if more_out:
out_dict.update({
'author_email': response.author_email,
'releases': ', '.join(response.releases) if response.releases else None,
'project_urls': response.project_urls,
'requires_python': response.requires_python,
'license': response.license,
'current_release_url': response.latest_release_url,
'current_package_info': response.latest_pkg_urls,
'dependencies': ', '.join(response.dependencies) if response.dependencies else None,
})
else:
out_dict.update({
'latest_releases': ', '.join(response.latest_releases) if response.latest_releases else None,
})
return out_dict | 4e90c3124c78a5499a851a03786a36dfdf363ff5 | 128,857 |
def list_to_int(str_list):
"""Convert list from strings to int"""
return [int(x) for x in str_list] | 6b2413104be30e8e125e760e657bdc432c06c861 | 128,859 |
from typing import List
from typing import Any
def dup(iterable: List[Any]) -> List[bool]:
""" returns a list indicating whether the coresponding index in `iterable` is a duplicated value."""
return [iterable.count(i) > 1 for i in iterable] | 9a82f6f49c0a59487db781e68d485196ead0ce51 | 128,870 |
def dict_sample_list(a_dict, n_sample = 4):
"""Sample dictionary data using even spacing - returns a list for printing"""
data_len = len(a_dict)
header = ['Printing {} entries evenly sampled from {} combinations:\n'.format(n_sample, data_len)]
return header + ['** {} ** \n{}'.format(k,repr(a_dict[k])) for k in list(a_dict.keys())[1:-1:data_len // n_sample]] | a76bf2bc18bb9356bf38ccef566a38e87757fa28 | 128,877 |
import torch
def Dist(sample, adv, ordr=float('inf')):
"""Computes the norm of the difference between two vectors. The operation is done for batches of vectors
sample: pytorch tensor with dimensions [batch, others]
adv: pytorch tensor with the same dimensions as 'sample'
ordr: order of the norm. Must be in {1,2,float('inf')}
outputs -> pytorch tensor of dimensions [batch] containing distance values for the batch of samples.
"""
sus = sample - adv
sus = sus.view(sus.shape[0], -1)
return torch.norm(sus, ordr, 1) | f08da893a8d5250fd3478a35c01ba0cc608b560d | 128,886 |
import logging
def log_level(s):
""" Converts a string to a valid logging level """
numeric_level = getattr(logging, s.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError("Invalid log level: {}".format(s))
return numeric_level | 9239eb5cba27fcc25a9f725a2f69d4c59c340912 | 128,887 |
def df_to_csv(df,filename):
"""
writes dataframe to csv
Parameters:
df: pandas dataframe
filename: csv name
"""
data = df.to_csv(f'{filename}.csv',index=False)
return data | d009752af86bb143ea0bf73ea2c8424d28e37da5 | 128,888 |
def unique_elements(list_):
"""
Functions to find unique elements from a list of given numbers.
Can also use 'list(set([list_]))'
"""
uniques = []
for number in list_:
if number not in uniques:
uniques.append(number)
return uniques | 2d5909b396854f7f684582b5e472b7bd07879bce | 128,890 |
def net_shortwave_radiation_daily(rs, albedo):
"""
:param rs: daily shortwave radiation [MJ m-2 day-1]
:param albedo: reflection coefficient (0 <= albedo <= 1), which is 0.23 for the hypothetical grass reference crop [-]
:return: daily net shortwave radiation reaching the earth [MJ m-2 day-1]
"""
return (1.0 - albedo) * rs | 25999204da3bfe9a6b7370aafd633b38a81f836d | 128,891 |
import torch
def rel2abs_traj(rel_pose):
"""Convert a given relative pose sequences to absolute pose sequences.
Args:
rel_pose (torch.Tensor): Relative pose sequence in the form of homogenous transformation matrix. Shape: [N,4,4]
Returns:
torch.Tensor: Absolute pose sequence in the form of homogenous transformation matrix. Shape: [N,4,4]
"""
global_pose = torch.eye(4, dtype=rel_pose.dtype, device=rel_pose.device)
abs_pose = torch.zeros_like(rel_pose)
for i in range(rel_pose.shape[0]):
global_pose = global_pose @ rel_pose[i]
abs_pose[i] = global_pose
return abs_pose | b2533d65262da65d80eb4869afde7b1256a8ec52 | 128,893 |
def triplet_sum2(arr: list[int], target: int) -> tuple[int, int, int]:
"""
Returns a triplet in the array with sum equal to target,
else (0, 0, 0).
>>> triplet_sum2([13, 29, 7, 23, 5], 35)
(5, 7, 23)
>>> triplet_sum2([37, 9, 19, 50, 44], 65)
(9, 19, 37)
>>> arr = [6, 47, 27, 1, 15]
>>> target = 11
>>> triplet_sum2(arr, target)
(0, 0, 0)
"""
arr.sort()
n = len(arr)
for i in range(n - 1):
left, right = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0) | fbe9d412c76638e58d8c306dc2ce6be115290403 | 128,897 |
def remove_top_right_line(ax):
"""
Remove the top and the right axis
"""
# Ensure that the axis ticks only show up on the bottom and left of the plot.
# Ticks on the right and top of the plot are generally unnecessary chartjunk.
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
return ax | 7b80c2a3032272b1146c2a3fad4dfcecc7b75a3e | 128,902 |
def field_initialise_tags(model, field, report=None):
"""
Load any initial tags for the specified tag field
You will not normally need to call this directly - instead use the
management command ``initialtags``.
Arguments:
model Model containing the field
field Field with initial tags to load
report Optional: a file handle to write verbose reports to
Returns True if loaded, False if nothing to load
"""
if not field.tag_options.initial:
return False
if report:
report.write("Loading initial tags for %s.%s.%s\n" % (
model._meta.app_label,
model.__name__,
field.name,
))
descriptor = getattr(model, field.name)
descriptor.load_initial()
return True | 7aa6de2275cfb45ea07cf8163aaf65979a770f83 | 128,903 |
from itertools import product
def get_diff_slices(ndim):
"""
Gets a list of pairs of slices to use to get all the neighbor differences for the given number
of dimensions.
"""
slices = (slice(1, None), slice(None, None), slice(None, -1))
out = []
for item in product((0, 1, 2), repeat=ndim):
if all(x == 1 for x in item): continue
item_inv = tuple(slices[2-x] for x in item)
item = tuple(slices[x] for x in item)
if (item_inv, item) not in out:
out.append((item, item_inv))
return out | b8fd2882b156141c147da54d1e959ca7c06023fc | 128,905 |
def _combine_to_memory(aggs: list):
"""Append dataframe aggregations to the given list instance."""
def append_df(df) -> None:
aggs.append(df)
return append_df | 11eab8a6b49fb3cd056a9ad509fdbe90de3d46e9 | 128,912 |
def get_blob(self, path, commit):
"""
Get the blob at corresponding path from the given commit.
:param pathlib.Path path: path relative to repo, leading to a file
:param git.Commit commit: the commit to get the blob from
:return: the corresponding blob instance or None if not found
:rtype: git.Blob or None
"""
try:
return commit.tree[path.as_posix()]
except KeyError:
return None | 83faea6b3bf2dc8fbd2702e4c681fb3bb33090ab | 128,913 |
def is_string(value):
"""Check if the value is actually a string or not"""
try:
float(value)
return False
except ValueError:
if value.lower() in ["true", "false"]:
return False
return True | 7cb61dcb81c6baa824a1537fd6c94f2b237c0669 | 128,917 |
from typing import Dict
from typing import Any
def prefix_dict(d: Dict[str, Any], prefix: str):
""" Prefix every key in dict `d` with `prefix`. """
return {f"{prefix}_{k}": v for k, v in d.items()} | ab27c08cf25c19ca2073bb7432692a838d0ada87 | 128,919 |
import base64
def b64str_to_bytes(b64str: str) -> bytes:
"""Convert base64-encoded string to binary."""
encoded_bytes = b64str.encode("ascii")
return base64.b64decode(encoded_bytes) | a549f9b865d1debbca6d731e40561c627886f2a0 | 128,920 |
def parse_report_for_epoch_metrics(report_dict, metrics=["train_mIoU"]):
"""
Parse report for given metric information over epoch and add them to list
Arguments:
report_dict: dictionalry with raw report data
metrics: list of metrics to search over epoch
Returns:
result: the dictionary of the form {metric : {"epoch": [], "value": []}
"""
result = {metric: {"epoch": [], "value": []} for metric in metrics}
for epoch in report_dict["epoch_metrics"]:
epoch_num = int(epoch)
for metric in metrics:
value = float(report_dict["epoch_metrics"][epoch][metric])
result[metric]["epoch"].append(epoch_num)
result[metric]["value"].append(value)
return result | a97747d986e1ccd79ab44a92e01987192f2afdb5 | 128,926 |
def __reverse_num(n: int):
"""
Helper function for check_palindrome_num()
Return a number with digits of n reversed.
:param n: int
:return: int
"""
rev = 0
while n > 0:
rev = rev * 10 + n % 10
n //= 10
return rev | bba8f94422e804abf0fca5769386ddcd36f45071 | 128,927 |
def get_ufa_repo_by_dir(filepath):
""" Reads .ufarepo file and returns root repository """
path_list = filepath.split('/')
path_list.append('.ufarepo')
ufarepo_path = "/".join(path_list)
try:
with open(ufarepo_path, "r") as f:
return f.readline().strip()
except FileNotFoundError:
return None | c29a9e5d2438b5061447cacd18bf7fd45baa0bed | 128,930 |
from pathlib import Path
def enterprise_1_9_artifact() -> Path:
"""
Return the path to a build artifact for DC/OS Enterprise 1.9.
"""
return Path('/tmp/dcos_generate_config_1_9.ee.sh') | b43fb03fe608f3cac6269eef091d6770150b0a91 | 128,934 |
def calcEMA(df, period_len, col="close"):
"""
Calculates Exponential Moving Average (EMA)
Read about EMA: https://www.investopedia.com/terms/e/ema.asp
Args:
df : pandas.DataFrame()
dataframe of historical ticker data
period_len : int
length of moving average periods
col: str
which col to use for calculation
Returns:
pandas.DataFrame()
dataframe of calculated EMA of period_len + original data
"""
prev_ema = None
if col == "MACD":
ma = df[col].head(26 + period_len).mean()
def __calc(row):
nonlocal prev_ema
if period_len + 26 >= row.name + 1:
return None
elif prev_ema != None:
ema_today = (row[col] * ((2 / (period_len + 1)))) + (
prev_ema * (1 - (2 / (period_len + 1)))
)
prev_ema = ema_today
return ema_today
else:
ema_today = (row[col] * ((2 / (period_len + 1)))) + (
ma * (1 - (2 / (period_len + 1)))
)
prev_ema = ema_today
return ema_today
else:
ma = df[col].head(period_len).mean()
def __calc(row):
nonlocal prev_ema
if period_len >= row.name + 1:
return None
elif prev_ema != None:
ema_today = (row[col] * ((2 / (period_len + 1)))) + (
prev_ema * (1 - (2 / (period_len + 1)))
)
prev_ema = ema_today
return ema_today
else:
ema_today = (row[col] * ((2 / (period_len + 1)))) + (
ma * (1 - (2 / (period_len + 1)))
)
prev_ema = ema_today
return ema_today
copy_df = df.copy().reset_index()
arr = copy_df.apply(__calc, axis=1).values
return arr | 1edca759adf08d4b0633ed74e506fd3d9e24f3b4 | 128,937 |
def calculate_indentation_levels(leading_spaces: str) -> int:
"""Returns the number of indentation levels, where a level is 4 spaces. So 0 leading_spaces has a level of
0; 4 leading spaces has a level of 1, and so on.
Args:
leading_spaces:
A string containing the leading spaces e.g. '', or ' ', and so on.
"""
assert leading_spaces.strip() == ''
assert len(leading_spaces) % 4 == 0
return int(len(leading_spaces) / 4) | e96859ba876b5f4c26179ac49d4103a2657df2ef | 128,940 |
import torch
def norm_gradient_squared(outputs, inputs, sum_over_points=True):
"""Computes square of the norm of the gradient of outputs with respect to
inputs.
Args:
outputs (torch.Tensor): Shape (batch_size, 1). Usually the output of
discriminator on real data.
inputs (torch.Tensor): Shape (batch_size, num_points, coordinate_dim + feature_dim)
or shape (batch_size, num_points, feature_dim) depending on whether gradient
is over coordinates and features or only features.
sum_over_points (bool): If True, sums over num_points dimension, otherwise takes mean.
Notes:
This is inspired by the function in this repo
https://github.com/LMescheder/GAN_stability/blob/master/gan_training/train.py
"""
batch_size, num_points, _ = inputs.shape
# Compute gradient of outputs with respect to inputs
grad_outputs = torch.autograd.grad(
outputs=outputs.sum(), inputs=inputs,
create_graph=True, retain_graph=True, only_inputs=True
)[0]
# Square gradients
grad_outputs_squared = grad_outputs.pow(2)
# Return norm of squared gradients for each example in batch. We sum over
# features, to return a tensor of shape (batch_size, num_points).
regularization = grad_outputs_squared.sum(dim=2)
# We can now either take mean or sum over num_points dimension
if sum_over_points:
return regularization.sum(dim=1)
else:
return regularization.mean(dim=1) | 1b5de939e2326ce6b9f41e6fb1805ec6fa32c0b6 | 128,950 |
def isNumber(str_val):
"""
A function which tests whether the input string contains a number of not.
"""
try:
float(str_val) # for int, long and float
except ValueError:
try:
complex(str_val) # for complex
except ValueError:
return False
return True | ea91bfe465e042831ec2f0e5d5e02d90a5195df0 | 128,958 |
def _format_time_zone_string(time_zone, date_time, format_string):
"""
Returns a string, specified by format string, of the current date/time of the time zone.
:param time_zone: Pytz time zone object
:param date_time: datetime object of date to convert
:param format_string: A list of format codes can be found at:
https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior
"""
return date_time.astimezone(time_zone).strftime(format_string) | f560952ea0eedfc693f1e7e2bbb5f0e8a8a489c8 | 128,964 |
import time
def run_exchange(
exchange,
order_q,
trader_qs,
kill_q,
start_event,
start_time,
sess_length,
virtual_end,
process_verbose):
"""
Function for running of the exchange.
:param exchange: Exchange object
:param order_q: Queue on which new orders are sent to the queue
:param trader_qs: Queues by which traders receive updates from the exchange
:param kill_q: Queue where orders to be removed from the exchange are placed
:param start_event: Event indicating if the exchange is active
:param start_time: float, represents the start t (seconds since 1970)
:param sess_length: int, number of seconds the
:param virtual_end: The number of virtual seconds the trading day lasts for
:param process_verbose: Flag indicating whether additional information about order processing should be printed
to console
:return: Returns 0 on completion of trading day
"""
completed_coid = {}
start_event.wait()
while start_event.isSet():
virtual_time = (time.time() - start_time) * (virtual_end / sess_length)
while kill_q.empty() is False:
exchange.del_order(virtual_time, kill_q.get())
order = order_q.get()
if order.coid in completed_coid:
if completed_coid[order.coid]:
continue
else:
completed_coid[order.coid] = False
(trade, lob) = exchange.process_order2(virtual_time, order, process_verbose)
if trade is not None:
completed_coid[order.coid] = True
completed_coid[trade['counter']] = True
for q in trader_qs:
q.put([trade, order, lob])
return 0 | 590025506bbd4643011509e6b75271405e602440 | 128,965 |
def check_index(index, valid_min, valid_max):
"""Check that an index doesn't exceed the valid bounds."""
if index >= valid_max:
index = index - valid_max
if index < valid_min:
index = index + valid_max
return index | c26e1baffc32b229dccbde37c144875e9d7a4293 | 128,966 |
def purge_block(data_blocks):
"""Remove all unused object blocks."""
counter = 0
for block in data_blocks:
if block.users == 0:
data_blocks.remove(block)
counter += 1
return counter | 18dff07a0e362ae0a441b859035b01d64ccb1b50 | 128,967 |
import math
def rotate_point(point, angle, origin=(0, 0)):
"""Rotate a point counterclockwise by a given angle in radians around a given origin."""
o_x, o_y = origin
p_x, p_y = point
r_x = o_x + math.cos(angle) * (p_x - o_x) - math.sin(angle) * (p_y - o_y)
r_y = o_y + math.sin(angle) * (p_x - o_x) + math.cos(angle) * (p_y - o_y)
return r_x, r_y | f42246e34ea794b5cc5b26ace1b3ae7a6fb04fed | 128,968 |
def prettify_string_array(array, max_length):
"""Returns a human readable string from an array of strings."""
_string = ''
_i = 0
for entry in array:
if len(_string) > max_length:
_string += ', and %s more.' % (_i+1)
break
if _i == 0:
_string += entry
elif 0<_i<len(array)-1:
_string += ', %s' % entry
elif _i == len(array)-1:
_string += ' and %s.' % entry
_i += 1
return _string | d762c69d99ac22705307986c68fc1d2200e7d1ef | 128,973 |
def save_linkage(linkage_matrix, filename):
"""Save the linkage matrix to file
Each row of the matrix is saved to a row in the output file
:param linkage_matrix:
:type linkage_matrix: numpy.ndarray
:param filename: file name
:type filename: basestring
:return: success of the saving process
:rtype: bool
"""
buffer=''
for i in range(len(linkage_matrix)):
row=linkage_matrix[i]
buffer += "{0:6d} {1:6d} {2:7.3f} {3:7d}\n".format(int(row[0]), int(row[1]), row[2], int(row[3]))
try:
open(filename, "w").write(buffer)
return True
except:
return False | 35dab1c65a0063e75832422d4e9ecb4922d2ac9b | 128,974 |
def get_variant_set(prim, variant_set_name):
"""
Returns variant from given variant set of the given prim
:param prim: Usd.Prim
:param variant_set_name: str
:return: Usd.Variant
"""
variant_sets = prim.GetVariantSets()
variant_set_names = variant_sets.GetNames()
if variant_set_name in variant_set_names:
return variant_sets.GetVariantSet(variant_set_name)
return None | 826457d27ff0dc164ede47d205c684cbfa735d8d | 128,977 |
def short_int_name(long_name):
"""
This function shortens the interface name for easier reading
:param long_name: The input string (long interface name)
:return: The shortened interface name
"""
replace_pairs = [
('fortygigabitethernet', 'Fo'),
('tengigabitethernet', 'Te'),
('gigabitethernet', 'Gi'),
('fastethernet', 'Fa'),
('ethernet', 'Eth'),
('port-channel', 'Po'),
('loopback', "Lo")
]
lower_str = long_name.lower()
for pair in replace_pairs:
if pair[0] in lower_str:
return lower_str.replace(pair[0], pair[1])
else:
return long_name | c6ccf72bc8a3982f84758db4a1592438755b4730 | 128,979 |
import io
import hashlib
def get_sha256(file_obj, block_size=io.DEFAULT_BUFFER_SIZE):
"""Calculate a sha256 hash for `file_obj`.
`file_obj` must be an open file descriptor. The caller needs to take
care of closing it properly.
"""
hash_ = hashlib.sha256()
for chunk in iter(lambda: file_obj.read(block_size), b''):
hash_.update(chunk)
return hash_.hexdigest() | e593a05c2dc092a701229afdaded54fcc19bc4e4 | 128,984 |
def real_return(nominal_return, inflation_rate):
"""Calculate an inflation-adjusted return.
Parameters
----------
nominal_return : float
Nominal return.
inflation_rate : float
Inflation rate.
Returns
-------
float
Real (inflation-adjusted) return.
"""
return (1 + nominal_return) / (1 + inflation_rate) - 1 | b30856478e6f237766bc0902dd4d7de414bcdab6 | 128,986 |
def format_records( records ):
"""
Format a list of records into text.
"""
text = [ ]
for ( date, value ) in records:
text.append( '\t'.join( [ date.isoformat( ), str( value ) ] ) )
return '\n'.join( text ) | cfa8cd36b55dc4dae320df3ce0e2bb8e9584ba38 | 128,988 |
import random
def ChangeNodeStart(nodes):
"""Cycles a node list so it starts at a random position."""
cutpt = random.randrange(0, len(nodes))
start = nodes[cutpt:]
start.extend(nodes[0:cutpt])
return start | 4157b7ca4b6020f160371ff45f509ba0a005634f | 128,993 |
def to_str(obj):
""" Returns the result of calling str() on `obj`. """
return str(obj) | b26e14df88e525376fe3106471dbd19ed3b1437b | 128,994 |
def _dims_to_strings(dims):
"""
Convert dims that may or may not be strings to strings.
"""
return [str(dim) for dim in dims] | 22428f90c0a5cee6a78a22e16226fef881afab34 | 129,002 |
def nmea_to_degrees(data):
""" Convert NMEA coordinates to degrees """
# pylint: disable = C0103
DD = int(float(data)/100)
SS = float(data) - DD * 100
return DD + SS/60
# pylint: enable = C0103 | 2cd25cb20c5d86f328c36c2265b14c23e1b0761e | 129,009 |
def splitOneListIntoTwo(inputList):
"""
Function that takes a list, each of whose entries is a list
containing two entries, and assembles two separate lists, one
for each of these entries
"""
list1 = []
list2 = []
for entry in inputList:
list1.append(entry[0])
list2.append(entry[1])
return list1, list2 | ee1d139bfb42bc77d8760e5f9f33cbde279c1669 | 129,010 |
def dict_contains(keys: tuple, check_dict: dict) -> bool:
"""
Check if a dictionary contains all of the desired keys.
"""
for key in keys:
if not key in check_dict:
return False
return True | 3a8548256aef09fc6e24f714207734f2093a30a3 | 129,011 |
def create_party(roles: list,
party_id,
mailing_addr,
delivery_addr):
"""Create party object with custom roles, mailing addresses and delivery aadresses."""
party = {
'officer': {
'id': party_id,
'firstName': 'Joe',
'lastName': 'Swanson',
'middleName': 'P',
'email': 'joe@email.com',
'organizationName': '',
'partyType': 'person'
},
'mailingAddress': None,
'deliveryAddress': None,
'roles': []
}
for role in roles:
party['roles'].append({
'roleType': role,
'appointmentDate': '2018-01-01'
})
if mailing_addr:
party['mailingAddress'] = mailing_addr
if mailing_addr:
party['deliveryAddress'] = delivery_addr
return party | 6d6292e1816b1d30a993588042a6dac82cfa24d8 | 129,017 |
def get_flags(app):
"""Get feature flag dictionary"""
return app.config.get("FLAGS", {}) | 23ad35173f11bff4f70ed2560df98840f22a5ae9 | 129,018 |
def compare_peaks(peak1, peak2):
"""Compares two given peaks
Args:
peak1 (string): The first peak to be compared
peak2 (string): The second peak to be compared
Returns:
int: -1 for smaller, 0 for same, 1 for larger (relative to peak 1)
"""
peak1_start = peak1[0]
peak2_start = peak2[0]
if peak1_start == peak2_start:
return 0
elif peak1_start > peak2_start:
return 1
else:
return -1 | f4b67558f8ea3cda5438e51ddda4c6426055d26b | 129,020 |
def is_int(str_val: str) -> bool:
""" Checks whether a string can be converted to int.
Parameters
----------
str_val : str
A string to be checked.
Returns
-------
bool
True if can be converted to int, otherwise False
"""
return not (len(str_val) == 0 or any([c not in "0123456789" for c in str_val])) | 36e3380675f8f2f5b4abd14a5bf27a76ba3b5bc7 | 129,021 |
import math
def _sine_sample(amp, freq, rate, i) -> float:
"""
Generates a single audio sample taken at the given sampling rate on
a sine wave oscillating at the given frequency at the given amplitude.
:param float amp The amplitude of the sine wave to sample
:param float freq The frequency of the sine wave to sample
:param int rate The sampling rate
:param int i The index of the sample to pull
:return float The audio sample as described above
"""
return float(amp) * math.sin(2.0 * math.pi * float(freq)
* (float(i) / float(rate))) | 223a48fcbea6f9ef98418d9a3d161f37f64f2a05 | 129,029 |
import torch
def get_dihedral_torch(c1, c2, c3, c4, c5):
""" Returns the dihedral angle in radians.
Will use atan2 formula from:
https://en.wikipedia.org/wiki/Dihedral_angle#In_polymer_physics
"""
u1 = c2 - c1
u2 = c3 - c2
u3 = c4 - c3
u4 = c5 - c4
return torch.atan2( torch.dot( torch.norm(u2) * u1, torch.cross(u3,u4) ),
torch.dot( torch.cross(u1,u2), torch.cross(u3, u4) ) ) | 7affbd90734b35d22ec2526b75cb6143ec9ab87d | 129,030 |
def get_labels(G):
"""Gets all the various labels of a graph
Args:
G (Graph): graph
Returns:
[set]: Set of labels
"""
labels = set()
for g in G:
for i in range(g.number_of_nodes()):
labels.add(g.nodes[i]["labels"][0])
return labels | 7a39fbfcc68a073c1ed08f638c8e1b3bdc591dbf | 129,031 |
import math
def round_up(num):
""" Round num to nearest int, .5 rounds up
"""
return int(num + math.copysign(0.5, num)) | 803a6979e93778bb467b44b75aa6b78fa54250e8 | 129,033 |
def unicode_open(*args, **kwargs):
"""
Opens files in UTF-8 encoding by default, unless an 'encoding'
keyword argument is passed. Returns a file object.
"""
if not "encoding" in kwargs:
kwargs["encoding"] = "utf-8"
return open(*args, **kwargs) | 19353eb47ab6bea93029d102df0dddf269057efc | 129,039 |
def RemoveRedundantPaths(childrenList, nodeList, partialCostTable):
"""
RemoveRedundantPaths: It removes the Redundant Paths. They are not optimal solution!
If a node is visited and have a lower g in this moment, TCP is updated.
In case of having a higher value, we should remove this child.
If a node is not yet visited, we should include to the TCP.
:params
- childrenList: LIST of NODES, set of childs that should be studied if they contain rendundant path
or not.
- nodeList : LIST of NODES to be visited
- partialCostTable: DICTIONARY of the minimum g to get each key (station id) from the origin Node
:returns
- childrenList: LIST of NODES, set of childs without rendundant path.
- nodeList: LIST of NODES to be visited updated (without redundant paths)
- partialCostTable: DICTIONARY of the minimum g to get each key (station id) from the origin Node (updated)
"""
def gen_parents(node):
while node:
yield node
node = node.father
newChildrenList=[]
for child in childrenList:
if child.station.id in partialCostTable and child.g<partialCostTable[child.station.id]:
partialCostTable[child.station.id]=child.g
nodeList=list(filter(lambda x: child.station.id not in [y.station.id for y in gen_parents(x)], nodeList))
elif child.station.id in partialCostTable and child.g>partialCostTable[child.station.id]:
continue
elif child.station.id not in partialCostTable:
partialCostTable[child.station.id]=child.g
newChildrenList.append(child)
return newChildrenList, nodeList, partialCostTable | 33ebe083f44f1af2b405fcd9dcc2e44f91a0fee4 | 129,040 |
def overlap(e1_start: int, e1_end: int, e2_start: int, e2_end: int) -> bool:
"""
Check if two spans overlap with each other
Args:
e1_start (int): span 1 begin offset
e1_end (int): span 1 end offset
e2_start (int): span 2 begin offset
e2_end (int): span 2 end offset
Returns:
bool: True if they overlap, False otherwise
"""
if e1_start <= e2_start < e2_end <= e1_end:
return True
elif e2_start <= e1_start < e1_end <= e2_end:
return True
elif e1_start <= e2_start < e1_end <= e2_end:
return True
elif e2_start <= e1_start < e2_end <= e1_end:
return True
return False | 143599676865ffc0204caed19ca8cf3126dfe3aa | 129,042 |
def count_idle_cores(slots):
"""
Counts cores in the idle slots dataframe
"""
count = 0
if not slots.empty:
count = slots["Cpus"].sum()
return count | 465469e5f0275ac898d388979a010de184b1f430 | 129,046 |
def sorted_maybe_numeric(x):
"""
Sorts x with numeric semantics if all keys are nonnegative integers.
Otherwise uses standard string sorting.
"""
all_numeric = all(map(str.isdigit, x))
if all_numeric:
return sorted(x, key=int)
else:
return sorted(x) | a297ae986a20b44afd7d2520372176e69f4c5962 | 129,048 |
def _shell_escape(cmd):
"""escape quotes, backticks and dollar signs"""
for char in ('"', '$', '`'):
cmd = cmd.replace(char, '\%s' % char)
return cmd | f325f1e088a69f893d317b2e4b9f6167e9019f0e | 129,054 |
import base64
import gzip
def compress(data):
"""
Compresses a string (or bytes) using Gzip and returns its Base64 representation
:param data: string or bytes
:return: Base64 (bytes) representation of compressed data
"""
if isinstance(data, bytes):
source = data
elif isinstance(data, str):
source = bytes(data, encoding='utf-8')
else:
raise RuntimeError("Compression is only supported for strings and bytes")
return base64.b64encode(gzip.compress(source)) | 00173395af6684883dd24e4481ce459c3e3cf13a | 129,057 |
def is_grpc_service_dir(files):
"""Returns true iff the directory hosts a gRPC service.
Args:
files (list): The 'files' output of os.walk().
Returns:
Boolean: True if '.grpc_service' is in the files, otherwise False.
"""
return '.grpc_service' in files | f5c200f95fc440fedb724fd3de6d02ed8cd0a805 | 129,064 |
def get_number_of_words(words):
"""
Return number of words in text
"""
return len(words) | cedba2b603ddf8c2e3d5a517884c07309b7c85fd | 129,065 |
import itertools
def peek_first_item(gen):
"""
Peek at first item from generator if available, else return None
:param gen: generator
:return: first item, generator
"""
try:
item = next(gen)
except StopIteration:
item = None
gen = itertools.chain([item], gen)
return item, gen | 79bb10af662c03b053cce39474c01ed5d492a4fc | 129,070 |
def format_exception_default(exc):
"""Default exception formatter."""
return exc.args[0] % exc.args[1:] | 43dfedf1b66d9abb26cb5baf4add3aeca2ebde06 | 129,071 |
import torch
def get_ddpm_schedule(t):
"""Returns log SNRs for the noise schedule from the DDPM paper."""
return -torch.expm1(1e-4 + 10 * t ** 2).log() | 73a1481829d6feabf2cc852f71a3e4e683f879f9 | 129,074 |
import csv
from io import StringIO
def create_list(string):
"""
Creates a list from a string of the form [A, B]
:param string: string to convert to list
:return: list
"""
contents = csv.reader(StringIO(string[1:-1]), delimiter=',', quotechar='\"')
lst = []
for item in contents:
lst.extend([val.strip() for val in item])
return lst | 4d12a651588c201bfe1ed20b34c72ea46cbc7d2e | 129,082 |
def _get_id_token(request):
"""Returns the ID token authorizing a user, or None if missing.
Oppia uses the OAuth 2.0's Bearer authentication scheme to send ID Tokens.
Bearer authentication (a.k.a. token authentication) is an HTTP
authentication scheme based on "bearer tokens", an encrypted JWT generated
by a trusted identity provider in response to login requests.
The name "Bearer authentication" can be understood as: "give access to the
bearer of this token." These tokens _must_ be sent in the `Authorization`
header of HTTP requests, and _must_ have the format: `Bearer <token>`.
Learn more about:
HTTP authentication schemes:
https://developer.mozilla.org/en-US/docs/Web/HTTP/Authentication
OAuth 2.0 Bearer authentication scheme:
https://oauth.net/2/bearer-tokens/
OpenID Connect 1.0 ID Tokens:
https://openid.net/specs/openid-connect-core-1_0.html#IDToken
Args:
request: webapp2.Request. The HTTP request to inspect.
Returns:
str|None. The ID Token of the request, if present, otherwise None.
"""
scheme, _, token = request.headers.get('Authorization', '').partition(' ')
return token if scheme == 'Bearer' else None | 5d74a8c940a6d9d111535c79e24b282ee17009c0 | 129,084 |
def binary_search(arr, target):
"""
ATTENTION: THE PROVIDED ARRAY MUST BE SORTED!
Searches for an item using binary search algorithm. Run time: O(log n)
"""
if arr == []:
return False
mid_ind = int(len(arr) / 2)
if (target < arr[mid_ind]):
return binary_search(arr[:mid_ind], target)
elif target > arr[mid_ind]:
return binary_search(arr[mid_ind + 1:], target)
return True | 3062250caa67cbe2164713751af6eea23f0c9b60 | 129,086 |
def _GetStageName(name_enum):
"""Converts NameValueValuesEnum into human-readable text."""
return str(name_enum).replace('_', ' ').title() | cacb22ac15a416be9378e168f85adb36727bc91f | 129,088 |
def auto_table_args(cls, **extra_kwargs):
"""Merge SQLAlchemy ``__table_args__`` values.
This is useful when using mixins to compose model classes if the
mixins need to define custom ``__table_args__``. Since defining
them directly they can define ``__auto_table_args`` classproperties
which will then merged in the final model class using the regular
table args attribute::
@declared_attr
def __table_args__(cls):
return auto_table_args(cls)
:param cls: A class that has one or more `__auto_table_args`
classproperties (usually from mixins)
:param extra_kwargs: Additional keyword arguments that will be
added after merging the table args.
This is mostly for convenience so you can
quickly specify e.g. a schema.
:return: A value suitable for ``__table_args__``.
"""
posargs = []
kwargs = {}
for attr in dir(cls):
if not attr.endswith('__auto_table_args'):
continue
value = getattr(cls, attr)
if not value:
continue
if isinstance(value, dict):
kwargs.update(value)
elif isinstance(value, tuple):
if isinstance(value[-1], dict):
posargs.extend(value[:-1])
kwargs.update(value[-1])
else:
posargs.extend(value)
else: # pragma: no cover
raise ValueError(f'Unexpected tableargs: {value}')
kwargs.update(extra_kwargs)
if posargs and kwargs:
return tuple(posargs) + (kwargs,)
elif kwargs:
return kwargs
else:
return tuple(posargs) | a5768f4fe5c60e7b5ec0bff23720d5cf34381d48 | 129,089 |
def limit(x, y, d, nx, ny):
""" limit x,y values to edge of canvas. """
if x < 0:
x, d = 0, 0
if x > nx - 1:
x, d = nx - 1, 2
if y < 0:
y, d = 0, 3
if y > ny - 1:
y, d = ny - 1, 1
return x, y, d | 862f31c0e7d30553b04d7658d5f6a7187434dbde | 129,090 |
def is_rule(line: str, starting_chr: str = '*', omit: str = 'NOTE') -> bool:
"""If the first character of the line is the selected character and the line doesn't contain the omitted text (case-sensitive)."""
return True if line.startswith(starting_chr) and line.find(omit) == -1 else False | 7d3d1bffbfad1f103a7c130b65c18fd6a7a9d9b4 | 129,092 |
import ssl
import socket
def check_http2_support(url: str) -> bool:
"""
Checks if a URL supports HTTP/2 protocol.
Returns True or False.
"""
context = ssl.create_default_context()
context.set_alpn_protocols(['h2', 'spdy/3', 'http/1.1'])
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
secure_sock = context.wrap_socket(sock, server_hostname=url)
secure_sock.connect((url, 443))
return secure_sock.selected_alpn_protocol() == "h2" | 8055d5dae9db09a10993543bd85cc8af3086dacb | 129,100 |
def find_largest_number(numbers_list):
"""
Given a list of integers as input, this function sorts the list elements in ascending
order and returns the largest number in the list.
"""
sorted_numbers_list = sorted(numbers_list) # The list 'numbers_list' is sorted in ascending order.
largest = sorted_numbers_list[-1] # The last number is the largest number in list 'sorted_numbers_list
return largest | bb51bc1cdd933d3c0760786640f58c8830c5ebd6 | 129,102 |
def create_notion_db_schema(db_json, relevant_properties):
"""
Crating a schema of the notion database that was read
:param db_json (json): json object obtained by calling notion's api
:param relevant_properties (list): list of string with the names of the relevant properties
:return db_schema (dictionary): schema of the table that includes the properties' data type
"""
## Selecting a sample entry to go over all of it's properties
sample_entry = db_json["results"][0]["properties"]
## Bulding dictionary (schema) of the relevant properties and their datatypes
db_schema = {
prop: {
"data_type": sample_entry[prop]["type"]
}
for prop in sample_entry
if prop in relevant_properties
}
# print(db_schema)
return db_schema | e78f166f8577cb1c14e2f450eb873fa06fbd9099 | 129,109 |
def get_response_code(key):
"""
Translates numerical key into the string value for dns response code.
:param key: Numerical value to be translated
:return: Translated response code
"""
return {
0: 'NoError', 1: 'FormErr', 2: 'ServFail', 3: 'NXDomain', 4: 'NotImp', 5: 'Refused', 6: 'YXDomain',
7: 'YXRRSet', 8: 'NXRRSet', 9: 'NotAuth', 10: 'NotZone', 11: 'BADVERS', 12: 'BADSIG', 13: 'BADKEY',
14: 'BADTIME'
}.get(key, 'Other') | 9c84d9951cdf79be0b04a1a7627e1566e94978fe | 129,125 |
def insert_term(c, term):
"""Inserts the term into the database and returns the ID. """
c.execute(
'INSERT OR IGNORE INTO search_term (term) VALUES (?);',
(term,)
)
c.execute('SELECT id FROM search_term WHERE term=?;', (term,))
search_id = c.fetchone()[0]
return search_id | f78ca2479586864531d16c92222474d4db1b61e7 | 129,128 |
def ports_from_plones(plones):
""" Given playbook_plones, find all the ports in use in virtual hosts.
Return sorted.
"""
ports = []
for plone in plones:
for vhost in plone['webserver_virtualhosts']:
port = vhost.get('port')
if port is None:
protocol = vhost.get('protocol', 'http')
if protocol == 'https':
port = 443
else:
port = 80
else:
port = int(port)
ports.append(port)
return sorted(set(ports)) | d33e81ce5f3c07bb445f03288c23ab302f3dad66 | 129,129 |
import importlib
def get_driver(driver_class):
"""
Get Python class, implementing device driver, by its name.
:param str driver_class: path to a class, e.g. ``ducky.devices.rtc.RTC``.
:returns: driver class.
"""
driver = driver_class.split('.')
driver_module = importlib.import_module('.'.join(driver[0:-1]))
driver_class = getattr(driver_module, driver[-1])
return driver_class | 470a6cd18aef0cc5034479782d3045f1d0142b15 | 129,130 |
def _check_return_value(ret):
"""
Helper function to check if the 'result' key of the return value has been
properly set. This is to detect unexpected code-paths that would otherwise
return a 'success'-y value but not actually be succesful.
:param dict ret: The returned value of a state function.
"""
if ret["result"] == "oops":
ret["result"] = False
ret["comment"].append(
"An internal error has occurred: The result value was "
"not properly changed."
)
return ret | 788101b9fd6d8825a72156515584693eb3a041c3 | 129,132 |
def _rho_f(rhou):
"""Computes the flux of the continuity equation."""
return rhou | aac3f1bef3714615570035bef5f56bb65a1aabd4 | 129,133 |
def validate_graph_colors(graph):
"""
Validates that each graph node has a color distinct from the colors of its neighbors.
:param graph: the graph to examine
:return: boolean indicating whether the graph nodes have valid colors
"""
for graph_node in graph:
print(graph_node.color + ': ' + ', '.join(str(o.color) for o in graph_node.neighbors))
if graph_node.color in (o.color for o in graph_node.neighbors):
return False
return True | e2c3add1621027fd263d8466161ddc832a27f640 | 129,134 |
def max_or_0(it):
"""
>>> max_or_0([])
0
>>> max_or_0(iter([]))
0
>>> max_or_0(iter([-10, -2, -11]))
-2
"""
lst = list(it)
return max(lst) if lst else 0 | d45222ae48af18be28d3625d950a61e9e58ec0b4 | 129,143 |
import re
def without_domain(host, domain):
"""
Remove domain from a host
Args:
host (str): hostname
domain (str): dns domain
Returns:
host (str): hostname without domain
Examples:
>>> without_domain('www.google.com', 'google.com')
'www'
"""
if host.endswith(domain):
return re.sub(re.escape(domain) + r'$', '', host)
else:
return host | 93815b966738bc8c60f48d6470345c6572e23504 | 129,147 |
def print_first_word(words):
"""Prints the first word after popping it off."""
word = words.pop(0)
return print(word) | 4d42353ff97f0a3dc82401b60d15a6c1570b5400 | 129,148 |
import csv
def csv_reader(file_path):
"""
Return a csv file reader
"""
file_obj = open(file_path, "r")
reader = csv.reader(file_obj)
return file_obj,reader | 8d094407323826b6b35daa8d8e0ed3997e130f56 | 129,150 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.