content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def _get_docks_available(sta):
"""Given a GBFS station status blob, return the number of docks"""
return sta['num_docks_available'] | f74eb148af688f86a5d1f1960ce18cabac8d3cfb | 45,025 |
def istext(s_input):
"""
既然我们要判断这串内容是不是可以做为Json的value,那为什么不放下试试呢?
:param s_input:
:return:
"""
return not isinstance(s_input, bytes) | e1d27ce4c1f1eeaadbf7cf33a7ddebb38b6e7349 | 45,029 |
def create_output_filenames(filename, DID, version = '01'):
"""
creating the L2 output filenames from the input, assuming L1
"""
try:
file_start = filename.split('solo_')[1]
file_start = 'solo_' + file_start
L2_str = file_start.replace('L1', 'L2')
versioned = L2_str.split('V')[0] + 'V' + version + '_' + DID + '.fits'
stokes_file = versioned.replace('ilam', 'stokes')
icnt_file = versioned.replace('ilam', 'icnt')
bmag_file = versioned.replace('ilam', 'bmag')
bazi_file = versioned.replace('ilam', 'bazi')
binc_file = versioned.replace('ilam', 'binc')
blos_file = versioned.replace('ilam', 'blos')
vlos_file = versioned.replace('ilam', 'vlos')
return stokes_file, icnt_file, bmag_file, bazi_file, binc_file, blos_file, vlos_file
except Exception:
print("The input file: {file_path} does not contain 'L1'")
raise KeyError | 048970d0e131fba1df5adaee44deef482fcc7fc0 | 45,030 |
import math
def dist(a, b):
"""
distance between point a and point b
"""
return math.sqrt(sum([(a[i] - b[i]) * (a[i] - b[i]) for i in range(len(a))])) | fe5f2ad25bc7297e441986c78a950f06966ad0d7 | 45,031 |
def __calc_year(entered_year: int, beginning_year: int) -> int:
"""
Calculates the year as a single digit (0 for first year, 2 for 3rd year)
(+1 because it's zero indexed)
"""
return entered_year - beginning_year + 1 | 9a471b5d8893d6a848320f494ff5acc51786df3f | 45,032 |
def atom(text):
"""Parse text into a single float or int or str."""
try:
x = float(text)
return round(x) if round(x) == x else x
except ValueError:
return text | f8d3856c7864a1f6a07ad0c9e8a6cd7f2f16ac8b | 45,033 |
def _WrapUnaryOp(op_fn, inner, ctx, item):
"""Wrapper for unary operator functions.
"""
return op_fn(inner(ctx, item)) | 69bd9088211a341a0245005b2ab2ea9f173cd096 | 45,034 |
from typing import Any
def _convert_sql_format(value: Any) -> str:
"""
Given a Python value, convert to string representation
of the equivalent SQL datatype.
:param value: A value, ie: a literal, a variable etc.
:return: The string representation of the SQL equivalent.
>>> _convert_sql_format(1)
"1"
>>> _convert_sql_format("John Smith")
'"John Smith"'
"""
if value is None:
return "NULL"
elif isinstance(value, str):
return f'"{value}"'
elif isinstance(value, bytes):
return '"' + str(value).replace("b'", "")[:-1] + '"'
else:
return str(value) | 7e1728c19fb8698694ac194e60d76b2bddaf9c41 | 45,036 |
def _format_digest(digest, scheme, encoding):
"""Formats the arguments to a string: {scheme[.encoding]}digest."""
if not encoding:
return "{%s}%s" % (scheme, digest)
return "{%s.%s}%s" % (scheme, encoding, digest) | ed73e06c9ff2a1c3a96b7b0296de553e5b29596d | 45,037 |
def channel_squeeze(x,
groups):
"""
Channel squeeze operation.
Parameters:
----------
x : Tensor
Input tensor.
groups : int
Number of groups.
Returns:
-------
Tensor
Resulted tensor.
"""
batch, channels, height, width = x.size()
channels_per_group = channels // groups
x = x.view(batch, channels_per_group, groups, height, width).sum(dim=2)
return x | 4a0c070711ec7637807011ab986b87ad32c81d1d | 45,038 |
def _vector_to_matrix(Xv, k):
"""
Returns a matrix from reforming a vector.
"""
U = Xv.reshape((-1, k))
return U | 3247fb28cb7669ec5d8f8810281045581d19e57f | 45,039 |
def complement(x):
"""
This is a helper function for reverse(rule) and unstrobe(rule).
It is assumed that x is a character and the returned result
is also a character.
"""
return str(8 - int(x)) | 39cf0a52e5bca01fdf393f904479baa18a502752 | 45,040 |
def get_yt_link_time(url) -> int:
"""Get the seconds from youtube link's &t=hms format. Returns seconds"""
hours = ""
minuts = ""
secs = ""
surl = str(url)
if "t=" in surl or "time_continue=" in surl or "start=" in surl:
at = 0
i = len(surl)
letter = ""
while i > 0 and letter != "=":
i -= 1
letter = surl[i]
if at == 's':
try:
checkint = int(letter)
secs = surl[i] + secs
except:
pass
elif at == 'm':
try:
checkint = int(letter)
minuts = surl[i] + minuts
except:
pass
elif at == 'h':
try:
checkint = int(letter)
hours = surl[i] + hours
except:
pass
if i == len(surl) - 1: # the second letter from the end == an int, meaning there isnt an 's'
try:
checkint = int(letter)
at = 's'
secs = letter
except:
pass
if letter == 's':
at = 's'
elif letter == 'm':
at = 'm'
elif letter == 'h':
at = 'h'
if hours == "":
hours = 0
else:
hours = int(hours)
if minuts == "":
minuts = 0
else:
minuts = int(minuts)
if secs == "":
secs = 0
else:
secs = int(secs)
return hours * 3600 + minuts * 60 + secs | 316b7bbdc00080982b414c9b105d3292a1b0ed34 | 45,041 |
import subprocess
def findimports_to_dict(src_rep):
"""
This function executes
Parameters
----------
src_rep: str | list
repository to be parsed by findimports
Returns
-------
dict:
module-names found
"""
findimports = subprocess.run(
['findimports'] + src_rep, capture_output=True)
output = {}
current_line = ''
for line in findimports.stdout.decode('utf-8').splitlines():
stripped = line.strip()
# omit empty lines
if len(stripped) == 0:
continue
# line has no added spaces in front. Thus it must be a directory.
if len(stripped) == len(line):
# remove the double colon at the end
current_line = stripped[:-1]
output[current_line] = []
continue
# append package name
# only include the module name, e.g only skimage instead of skimage.filters
output[current_line].append(stripped.split('.')[0])
output = {k: set(output[k]) for k in output}
return output | 4089a74221a52dde475b16fa5d3366537a89856e | 45,042 |
def _read_text_file(path):
"""
Read and return all the contents of the text-file with the given path.
It is returned as a single string where all lines are concatenated.
"""
with open(path, 'rt') as file:
# Read a list of strings.
lines = file.readlines()
# Concatenate to a single string.
text = " ".join(lines)
return text | 0e343f5ee18b277483fb45c5f6e1221b0688c13b | 45,043 |
def get_model_shortcode(model_list):
"""
Get shortcode for the models, passed in as a list of strings
"""
shortcode_dict = {
"tuned_localboosting": "tK",
"tuned_cfsv2pp": "tC",
"tuned_climpp": "tD",
"perpp": "L",
"multillr": "M",
"tuned_salient2": "tS"
}
model_str = ""
for m in model_list:
if m in shortcode_dict:
model_str += shortcode_dict[m]
else:
model_str += m[0].upper()
return model_str | 6cb089ab9e7f83ff89d02448b1700760c7b5fe80 | 45,044 |
def preamble_for_label(label):
"""
Return the preamble for the documentation block for the given label.
:param label: The label to use as the paragraph title.
:return: The string that should be preamble of the new section.
"""
preamble = str("\n" + " "*4 + "@par " + label + ":\n\n")
preamble += str(" "*4 + "@htmlonly\n\n")
return preamble | 876a242a1a3807a1298753389d8619115d498ed8 | 45,046 |
def linear_search(lis, value):
"""
顺序查找
时间复杂度: O(n)
"""
for ind, v in enumerate(lis):
if v == value:
return ind
return None | 6e6fb3ccf9609d624f31a3338c002b63dc012b82 | 45,047 |
def isbn13_checksum (isbn_str):
"""
Return the checksum over the coding (first 12 digits) of an ISBN-13.
:Parameters:
isbn_str : string
An ISBN-13 without the trailing checksum digit.
:Returns:
The checksum character, ``0`` to ``9``.
For example:
>>> isbn13_checksum ("978094001673")
'6'
>>> isbn13_checksum ("979123456789")
'6'
"""
## Preconditions & preparation:
assert (len (isbn_str) == 12), 'expecting a 12-digit string'
## Main:
csum = 0
for i in range (0, len (isbn_str), 2):
csum += int (isbn_str[i]) + (3 * int (isbn_str[i+1]))
cdigit = 10 - (csum % 10)
if (cdigit == 10):
cdigit = 0
## Return:
return str (cdigit) | d9eb185f14e35508d993924ebc37d32442e8329b | 45,048 |
def decode(encoded, converter):
"""decodes using converter"""
decoded = converter[int(encoded[0])]
return decoded | 177e7812177f8c2c8b38fcf26d36c0e2fe084dd8 | 45,053 |
import re
def get_output_path(manifest, regex):
"""
lists status.nodes in an argo manifest, and grabs intermediary output files paths using the node tree represented by
status.nodes[*].name. Keeps only nodes of type 'Pod' and phase 'succeeded'.
Parameters
----------
manifest : dict
regex : str
regular expression syntax str to filter nodes based on which templates were executed within a given node and before that given
node in the tree.
Returns
------
dict:
path : str, the path to the intermediary output file
nodeId: the id of the manifest node that outputted this file
"""
out_zarr_path = None
nodeId = None
i = 0
for node in manifest['status']['nodes']:
this_node = manifest['status']['nodes'][node]
if this_node['type'] == 'Pod' and this_node['phase'] == 'Succeeded' and re.search(regex, this_node['name']):
i = i + 1
if i > 1:
raise Exception('I could not identify a unique node in the manifest for regex : ' + regex + '\n' +
'. Id of the first match : ' + nodeId + '\n' + 'Id of second match : ' + this_node['id'])
nodeId = this_node['id']
if 'outputs' in this_node and 'parameters' in this_node['outputs']:
for param in this_node['outputs']['parameters']:
if param['name'] == 'out-zarr':
out_zarr_path = param['value']
if out_zarr_path is None and nodeId is None:
raise Exception('I could not identify any node in the manifest')
return ({'path': out_zarr_path, 'nodeId': nodeId}) | 13c5cc55e1e3d212436babe0aaf2c4fee33d3f3b | 45,054 |
def reverse(head):
""""Reverse a singly linked list."""
# If there is no head.
if not head:
# Return nothing.
return
# Create a nodes list.
nodes = []
# Set node equal to the head node.
node = head
# While node is truthy.
while node:
# Add the node to the nodes list.
nodes.append(node)
# Set node equal to the next node.
node = node.next
# Reverse the list.
nodes.reverse()
# Set count equal to zero.
count = 0
# While count is less than the length of the nodes list
while count < len(nodes):
# If the index is less than the length of the node list minus 1.
if count < len(nodes) - 1:
# Set the current node's next node equal to the next node in the list.
nodes[count].next = nodes[count + 1]
else:
# Set the next node equal to none because this node is the tail of the list.
nodes[count].next = None
# Increment the count.
count = count + 1
# Return the first node in the nodes list.
return nodes[0] | d47abcb07df850f74fe0f47f6c1e1b0494097305 | 45,056 |
import logging
def setup_logger(log_path):
"""Create a logger and return the handle."""
# Create a custom logger
logger = logging.getLogger(__name__)
logger.setLevel(10)
# Create console handler
c_handler = logging.StreamHandler()
c_handler.setLevel(21)
# Create file handler
f_handler = logging.FileHandler(log_path)
f_handler.setLevel(logging.INFO)
# Create formatters and add it to handlers
c_format = logging.Formatter('%(asctime)s - %(message)s', datefmt = '%Y-%m-%d %H:%M')
f_format = logging.Formatter('%(asctime)s - %(message)s')
c_handler.setFormatter(c_format)
f_handler.setFormatter(f_format)
# Add handlers to the logger
logger.addHandler(c_handler)
logger.addHandler(f_handler)
return logger | 6f19656cdd61f46f75d171703cf46381646a5291 | 45,057 |
def to_string(pairs):
"""Converts a series of (int, int) tuples to a time series string."""
return " ".join("%i:%i" % pair for pair in pairs) | db720677504e254b9fe81d5d9e41004b63304de0 | 45,058 |
def get_members(module):
"""Get all public members from a module."""
namespace = [attr for attr in dir(module) if not attr.startswith("_")]
return [getattr(module, attr) for attr in namespace] | 1ae8893c7a3b7a32ba01e9ffd1c41651998a84af | 45,059 |
import os
def read_file(path, file_name=''):
""" Open a file and read it as a string
"""
fname = os.path.join(path, file_name)
with open(fname, errors='ignore') as file_obj:
file_str = file_obj.read()
return file_str | 1e0bf307098bd60daa77ff6120e4e2c839dd9373 | 45,060 |
def leap(year: int):
"""
- If a year is evenly divisible by 4 means having no remainder then go to next step.
- If it is not divisible by 4. It is not a leap year. For example: 1997 is not a leap year.
- If a year is divisible by 4, but not by 100. For example: 2012, it is a leap year.
- If a year is divisible by both 4 and 100, go to next step.
- If a year is divisible by 100, but not by 400. For example: 1900, then it is not a leap year.
- If a year is divisible by both, then it is a leap year. So 2000 is a leap year.
"""
if (year % 4) == 0:
if (year % 100) == 0:
if (year % 400) == 0:
return True
else:
return False
else:
return True
else:
return False | f04e526c35ed0d0357223ba8041aae60ebcf8c16 | 45,061 |
def get_icon_info(family_name, icons, host, asset_url_pattern):
"""Returns a list containing tuples of icon names and their URLs"""
icon_info = []
for icon in icons:
if family_name not in icon['unsupported_families']:
name = icon['name']
url_params = {
'family' : family_name,
'icon' : name,
'version' : icon['version'],
'asset' : '24px.xml'
}
info = (name, 'http://' + host + asset_url_pattern.format(**url_params))
icon_info.append(info)
return icon_info | e43b0edd9c4421fac338a41fb245946cc6335463 | 45,062 |
def get_epsg_srid(srs_name):
"""Parse a given srs name in different possible formats
WFS 1.1.0 supports (see 9.2, page 36):
* EPSG:<EPSG code>
* URI Style 2
* urn:EPSG:geographicCRS:<epsg code>
:param srs_name: the Coordinate reference system. Examples:
* EPSG:<EPSG code>
* http://www.opengis.net/def/crs/EPSG/0/<EPSG code> (URI Style 1)
* http://www.opengis.net/gml/srs/epsg.xml#<EPSG code> (URI Style 2)
* urn:EPSG:geographicCRS:<epsg code>
* urn:ogc:def:crs:EPSG::4326
* urn:ogc:def:crs:EPSG:4326
:return: the authority and the srid
:rtype: tuple
"""
authority = None
srid = None
values = srs_name.split(':')
if srs_name.find('/def/crs/') != -1: # URI Style 1
vals = srs_name.split('/')
authority = vals[5].upper()
srid = int(vals[-1])
elif srs_name.find('#') != -1: # URI Style 2
vals = srs_name.split('#')
authority = vals[0].split('/')[-1].split('.')[0].upper()
srid = int(vals[-1])
elif len(values) > 2: # it's a URN style
if len(values) == 3: # bogus
pass
else:
authority = values[4].upper()
# code is always the last value
try:
srid = int(values[-1])
except Exception:
srid = values[-1]
elif len(values) == 2: # it's an authority:code code
authority = values[0].upper()
try:
srid = int(values[1])
except Exception:
srid = values[1]
return authority, srid | e6fa3c65dcfe0a76858118bb6575a45ebcdfe0a3 | 45,063 |
def get_reg_lambd():
"""Return default regularization parameter.
"""
return 1e-5 | 1a2c1f8aa6748b26d6afeb0002e57b02716d6cee | 45,064 |
import re
def strip_item(s):
"""
removes all types of brackets and quotation marks from string object
used to strip name objects
"""
s = re.sub(r'[()\[\],"]', '', s) # removes all types of brackets and quotes
return s.strip().lower() | 51b86f29fff31ad4cc035720a709d9ae21f68cdb | 45,065 |
def team_year_key(*args):
"""
Create a key string to identify a combination of team and year.
If 2 arguments are passed, it assumes it must construct a key from a pair of
team and year.
If 1 argument is passed, it assumes it is a key and must de-construct it
into a team and year pair.
"""
if len(args) == 2:
team, year = args
return team + ':' + str(year)
elif len(args) == 1:
team, year = args[0].split(':')
if year != 'all_time':
year = int(year)
return team, year
else:
raise ValueError("Don't know what to do with %i elements" % len(args)) | 1f266149a8397db16a9041dcd406b0cb2c31e69e | 45,066 |
def c2m(pos, scale=100):
"""Convert screen/pixel coordinates to map coordinates"""
return pos * 40.85 * 100 / 2048 / scale + 1 | 22ea2638e6e5cf26c21c2a689690ceeabc7c03bc | 45,068 |
def build_label(vertex_id, agents):
"""Builds the emoji representation of all agents on a single vertex"""
return f"{vertex_id}" + "".join(agent.emojify() for agent in agents) | 25e32e246e8893335958f03d5fbdcd36aae9106f | 45,070 |
import json
import logging
def get_num_classes(data_config_files):
"""
Determines the number of classes in a model from the Data Config files.
If the number of classes is missing from the config, an error will be logged and the program will exit.
If the config files disagree on the number of classes, an warning will be logged and the highest number of classes
will be returned.
:param data_config_files: Array of data set config files
"""
num_classes = []
for config_file in data_config_files:
with open(config_file) as file:
config = json.load(file)
if 'numModelClasses' not in config.keys():
logging.error(f"Config file {config_file} does not specify the number of classes.")
exit(-1)
else:
num_classes.append(config['numModelClasses'])
# Checks if config is a data config and if max label value in the dataset is not smaller than num_classes
if 'data' in config.keys() and num_classes[-1] <= max([data_set['label'] for data_set in config['data']]):
# remember that labels are indexed at 0, so if largest label == num_classes, the label is too large
logging.error(f"Config file {config_file} has a label beyond the number of classes specified.")
# Checks if all config files specify the same number of classes
if num_classes.count(num_classes[0]) != len(num_classes):
logging.warning(f"Config files do NOT specify the same number of classes. The largest number of "
f"{max(num_classes)} will be used.")
return max(num_classes) | f880ce64a3b42b15f422dd0652fb1175de23ccc2 | 45,072 |
def wrap_msg(msg):
"""
Wrap a log message with '=' marks.
Necessary for making cluster load background logging distinguishable
Args:
msg (str): The log message to wrap
Returns:
str: The wrapped log message
"""
marks = "=" * len(msg) if len(msg) < 150 else "=" * 150
return f"\n{marks}\n{msg}\n{marks}" | b0fd02b0a7a75a05bf4578cb2ca1c186d725da08 | 45,073 |
import requests
def exists(path):
"""
Checks if data is available at FTP location
"""
r=requests.head(path)
return r.status_code==requests.codes.ok | 9b2e26b54b4f757773d7effe56a67802585e7946 | 45,074 |
def Tail(filename, n_lines):
"""Return the last several lines of a file.
If the file does not exist, an empty string is returned.
Args:
filename: Name of the file to read.
n_lines: Number of lines to return.
Returns:
String containing the file data.
"""
lines = []
try:
with open(filename, 'r') as f:
lines = f.readlines()
except IOError:
pass
return ''.join(lines[-n_lines:]) | a5ec45629cea9c6dae2e7de3d168c7c071fe42d3 | 45,075 |
def fits_idx(idx):
"""Converts an index to instrument
"""
fits_list = dict(zip(list(map((lambda x: 2**x),range(6))),
['HIRES','ESI','UVES','XX','MIKEb','MIKEr']))
try:
return fits_list[idx]
except:
return 'Unknown' | 3a3a5c3396b30c2f5ae359897f6e2a5eb5e83e1f | 45,076 |
def and_list(args: list) -> str:
""" Given a list of strings, format them into an oxford-comma's and list
Does not deal with commas embedded in arg strings.
"""
return_str = ', '.join(args)
match return_str.count(','):
case 0:
pass
case 1:
return_str = return_str.replace(',', ' and')
case _:
point = return_str.rindex(',') + 1
return_str = return_str[0:point] + ' and' + return_str[point:]
return return_str | 33dd14b814621144d17f415bc8435fa9a7ebfabd | 45,077 |
def _fmt_fields(fld_vals, fld2fmt):
"""Optional user-formatting of specific fields, eg, pval: '{:8.2e}'."""
vals = []
for fld, val in fld_vals:
if fld in fld2fmt:
val = fld2fmt[fld].format(val)
vals.append(val)
return vals | a801a1f37d2d64bea4bb10bed8f4f175daa047a4 | 45,078 |
def sec_to_time(sec: int) -> str:
""" Convert second to human readable time """
h = sec // 3600
sec = sec % 3600
m = sec // 60
sec = sec % 60
return '{:02}:{:02}:{:02}'.format(h, m, sec) | 879f93dbbf1bf4388e3824c7c02cffda80f76d98 | 45,079 |
from pathlib import Path
def f2i(f: Path) -> int:
"""block_2.csv->2"""
return int(f.stem.split('_')[-1]) | fb2e31ec4d74909f96f0e47ab8da56e822675136 | 45,083 |
import re
from typing import OrderedDict
def expose_extr(hrefs):
"""Takes soup element of one search page of immowelt and returns
all the exposes as list of str
Args:
hrefs (soup element): search page of immowelt
Returns:
list: exposes as strings
"""
exposes = [re.findall("\/expose\/(\w+)", a) for a in hrefs]
exposes = [a[0] for a in exposes if len(a) != 0]
exposes = list(OrderedDict.fromkeys(exposes))
return exposes | 79aed3d1c121658766721fad023e4cfd1cf0ca7d | 45,085 |
import yaml
def read_config():
"""Reads our config file
:return: dict
"""
with open('config.yaml', 'r') as stream:
try:
return yaml.load(stream, Loader=yaml.FullLoader)
except yaml.YAMLError as exc:
print(exc) | 91a61a0baf7344fabae2649702701e087de1d3fa | 45,087 |
import pickle
def load_list(list):
"""Function to load specific list"""
with open("../data/{}.txt".format(list), "rb") as file:
the_list = pickle.load(file)
return the_list | 3eea8f4e4f9ebf4ac60d851b8da2e5798f75fb77 | 45,088 |
import os
import argparse
def argparse_readable_folder(value):
"""Argparse type for a readable folder"""
if not os.path.exists(value):
raise argparse.ArgumentTypeError("{0} doesn't exist".format(value))
if not os.path.isdir(value):
raise argparse.ArgumentTypeError("{0} exists but isn't a folder".format(value))
if not os.access(value, os.R_OK):
raise argparse.ArgumentTypeError("{0} exists and is a folder but isn't readable".format(value))
return os.path.abspath(value) | 0aef4dc857a378c164d37bac43407363515f74c6 | 45,089 |
import sympy
def symbol(name, real=True):
"""
Create symbolic variables
:param name: symbol names
:type name: str
:param real: assume variable is real, defaults to True
:type real: bool, optional
:return: SymPy symbols
:rtype: sympy
.. runblock:: pycon
>>> from spatialmath.base.symbolic import *
>>> theta = symbol('theta')
>>> theta
>>> theta, psi = symbol('theta psi')
>>> theta
>>> psi
>>> q = symbol('q_:6')
>>> q
.. note:: In Jupyter symbols are pretty printed.
- symbols named after greek letters will appear as greek letters
- underscore means subscript as it does in LaTex, so the symbols ``q``
above will be subscripted.
:seealso: :func:`sympy.symbols`
"""
return sympy.symbols(name, real=real) | 15b3f80eef4da60bd1240e344a922f6107bc42d2 | 45,091 |
def bucketed_list(l, bucket_size):
"""Breaks an input list into multiple lists with a certain bucket size.
Arguments:
l: A list of items
bucket_size: The size of buckets to create.
Returns:
A list of lists, where each entry contains a subset of items from the input
list.
"""
n = max(1, bucket_size)
return [l[i:i + n] for i in range(0, len(l), n)] | ba56f5ab2c58c231129cdfc34983965034ef1f3b | 45,093 |
import site
import sys
import subprocess
def install_wheel(whl):
"""Installs a wheel file"""
whl_args = [
sys.executable,
'-m',
'pip',
'install',
'--ignore-installed',
]
rc = subprocess.Popen(whl_args + [whl]).wait()
if rc != 0:
try:
if hasattr(site, 'getusersitepackages'):
site_packages = site.getusersitepackages()
print("Installing to user site packages...", site_packages)
rc = subprocess.Popen(whl_args + ["--user"] + [whl]).wait()
except ImportError:
pass
return rc | 25ac2238538118dd39b8590a6c76e44d934e8a35 | 45,096 |
def complex_sort(numbers):
""" This is the description of the function ~ Loves it3
Parameters
----------
numbers : array
array to sort
Returns
-------
array
printed array
"""
return sorted(numbers, key=abs) | 35f785f2c73fc4bfe8525e33ae3da27a54092ce3 | 45,099 |
from datetime import datetime
def getInfo(filename: str):
"""Retorna la parametrizacion y el timestamp a partir del
nombre del archivo wrfout
../geotiff/10V_GDS0_SFC/10V_GDS0_SFC_1991-07-18Z22:00.tiff"""
filename = filename.split('/')[-1]
var, temp = filename.split('_', 1)
temp1, temp1, timestamp = temp.split('_', 2)
timestamp, extension = timestamp.split('.', 1)
date = datetime.strptime(timestamp, "%Y-%m-%dZ%H:%M")
return var, date | 0fedb3567d6675d2c61904852a94519af6acc595 | 45,100 |
def assert_indexed(func):
"""
Decorator to ensure surface has been indexed prior to calling function.
"""
def asserted(self, *args, **kwargs):
if self.indexed is None:
raise RuntimeError("Surface must be indexed before calling ",
func.__name__)
return func(self, *args, **kwargs)
return asserted | 4f39d071224a4870b676fee8a3bfb890a3025091 | 45,102 |
def get_credit_card_full_po_lines_from_date(alma_api_client, date):
"""Get a list of full PO line records for credit card purchases (acquisition_methood =
EXCHANGE) from the specified date."""
brief_po_lines = alma_api_client.get_brief_po_lines("EXCHANGE")
credit_card_full_po_lines = []
for brief_po_line in (
p
for p in brief_po_lines
if p.get("created_date") == f"{date}Z" and p.get("number") is not None
):
full_po_line = alma_api_client.get_full_po_line(brief_po_line.get("number"))
credit_card_full_po_lines.append(full_po_line)
return credit_card_full_po_lines | 7d2840b28c0ac4ca9e8f0af00997cb5771ed3815 | 45,103 |
def pkcs_1_5(b: bytes, size: int) -> int:
"""
PKCS#1.5 padding.
Create a block of the form:
00 || BT || PS || 00 || b
Where BT is usually 0x01 and
PS are 0xff bytes in a number
such that the whole block is filled.
The length of b must be less than the size of
the block minus 3 (00 x 2 and BT).
:param b: A buffer of bytes.
:param size: The block size.
:return: The padded buffer (as int).
"""
assert len(b) < size - 3
padded = bytearray((0x00, 0x02))
padded += bytearray(0xff for _ in range(size - 3 - len(b)))
padded += bytearray((0x00,))
padded += b
return int.from_bytes(padded, byteorder="big") | 340aea6cf6f4f9640c4b0ed15f1525caa89de167 | 45,104 |
from typing import List
def spatial_to_serial_order(hole_sequence: List[int],
holes: List[int]) -> List[int]:
"""
Converts a temporal sequence of spatial holes into a list of serial
order positions.
Converts the list of spatial holes in use (``hole_sequence``) and the
temporal sequence of hole indexes (``holes``) into a sequence of spatial
hole numbers.
Args:
hole_sequence: ordered list of spatial hole numbers to be presented
in the first phase of the task, e.g. [3, 1, 4].
holes: spatial hole numbers to be enquired about: "what was the
temporal order of these holes in the first phase?"; e.g. [4, 3].
Returns:
list of serial order positions (in this example: [3, 1]).
"""
return [hole_sequence.index(h) + 1 for h in holes] | 2f1d474d310ee9c89c710f875612dd97d4153479 | 45,106 |
def _find_formatter(formatters):
"""Returns a formatter that takes x, and applies formatter based on types.
Args:
formatters: map from type to formatter
Returns:
function: x -> displayable output
"""
def formatter(x):
for type_, formatter_for_type in formatters.items():
if isinstance(x, type_):
return formatter_for_type(x)
return x
return formatter | 00961fe103fd1888b85184245ce19bf025759c46 | 45,109 |
def wrap_frame_index(t_index, T):
""" Handle frame index if it less than 0 or larger than T. """
t2_index = []
for t in t_index:
if t < 0:
t2 = t + T
elif t >= T:
t2 = t - T
else:
t2 = t
t2_index += [t2]
return t2_index | 9d3bf472298ab5f916a5ef8ddc862be703b17b54 | 45,111 |
from subprocess import Popen, PIPE
def last_revision():
"""Get the svn revision number.
Returns
-------
:class:`str`
The latest svn revision number. A revision number of 0 indicates
an error of some kind.
Notes
-----
This assumes that you're running ``python setup.py version`` in an
svn checkout directory.
"""
proc = Popen(['svnversion', '-n', '.'],
universal_newlines=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
# svn 1.7.x says 'Unversioned', svn < 1.7 says 'exported'.
if out.startswith('Unversioned') or out.startswith('exported'):
return '0'
if ':' in out:
rev = out.split(':')[1]
else:
rev = out
rev = rev.replace('M', '').replace('S', '').replace('P', '')
return rev | fbc99ef79576d4681bcd406482fecb98c78b92ce | 45,115 |
import base64
def _base64encode(obj):
"""Return a base64 encoded object.
:returns: Bytes
"""
return base64.b64encode(str.encode(obj)) | a14a42ffa456d42eb5a61e398b527302536fa040 | 45,116 |
def _f2s(number, dec=4):
"""
Return string representation of ``number``.
Returned string is:
* without trailing decimal zeros,
* with at most ``dec`` decimal places.
"""
if not isinstance(number, (int, float)):
return number
return '{{:.{:d}f}}'.format(dec).format(number).rstrip('0').rstrip('.') | 20cbf5e5bf26e35b075b2785a7da15260bb92974 | 45,118 |
def colorGlobalRule(scModel, edgeTuples):
"""
:param scModel:
:param edgeTuples:
:return:
"""
found = False
for edgeTuple in edgeTuples:
op = scModel.getGroupOperationLoader().getOperationWithGroups(edgeTuple.edge['op'], fake=True)
if op.category == 'Color' or (op.groupedCategories is not None and 'Color' in op.groupedCategories):
found = True
break
return 'yes' if found else 'no' | c1ac592b3f7b436f3884e6fa916f64d514a57646 | 45,119 |
def generate_script(group, entry_point, header, template):
"""Generate the script based on the template.
:param str group:
The entry-point group name, e.g., "console_scripts".
:param str header:
The first line of the script, e.g., "!#/usr/bin/env python".
:param str template:
The script template.
:returns:
The templated script content
:rtype:
str
"""
if not entry_point.attrs or len(entry_point.attrs) > 2:
raise ValueError("Script targets must be of the form "
"'func' or 'Class.class_method'.")
script_text = template % dict(
group=group,
module_name=entry_point.module_name,
import_target=entry_point.attrs[0],
invoke_target='.'.join(entry_point.attrs),
)
return header + script_text | 847871bc7344dcfda994a9e985e9a541c96fff81 | 45,123 |
def determine_band_channel(kal_out):
"""Return band, channel, target frequency from kal output."""
band = None
channel = None
tgt_freq = None
while band is None:
for line in kal_out.splitlines():
line = line.decode("utf-8")
if "Using " in line and " channel " in line:
band = line.split()[1]
channel = line.split()[3]
tgt_freq = line.split()[4].replace(
"(", "").replace(")", "")
return(band, channel, tgt_freq) | abb38ee2b889283e13ef5597a145dd3c503ba638 | 45,124 |
import pkg_resources
def example_label_file(phone_level=False):
"""Get path of example HTS-style full-context lable file.
Corresponding audio file can be accessed by
:func:`example_audio_file`.
Args:
phone_level: If True, returns phone-level aligment, otherwise state-level
alignment.
Returns:
str: Path of the example label file.
See also:
:func:`example_audio_file`
Examples:
>>> from nnmnkwii.util import example_label_file
>>> from nnmnkwii.io import hts
>>> labels = hts.load(example_label_file())
"""
name = "arctic_a0009"
label_path = pkg_resources.resource_filename(
__name__, '_example_data/{}_{}.lab'.format(
name, "phone" if phone_level else "state"))
return label_path | cbf045aa2a23d540baaf17d4586c88b3d2f68007 | 45,125 |
import random
import string
def random_string(length: int = 8) -> str:
"""
Returns a random string (just letters) of a specified length
:param length: the length
:return: the random string
"""
return "".join(random.choice(string.ascii_letters) for _ in range(length)) | 65f80033cd0265205f1369dbdef52c71a2035559 | 45,126 |
def f_rise(t_half):
"""
Davenport+ 2014 Eqn. 1
"""
return (1 + 1.941 * t_half - 0.175 * t_half**2 -
2.246 * t_half**3 - 1.125 * t_half**4) | 633696c351dc3c3b8ff5a9947ea6b14a102f1b1f | 45,127 |
import string
import random
def tamper(payload, **kwargs):
"""
replaces <here> with 3 random generated chars - helpful when we need to generate unique entries for user names
Tested against:
* Microsoft SQL Server 2005
* MySQL 4, 5.0 and 5.5
* Oracle 10g
* PostgreSQL 8.3, 8.4, 9.0
Notes:
* Useful to bypass very weak and bespoke web application firewalls
that has poorly written permissive regular expressions
>>> tamper('INSERT')
'insert'
"""
retVal = payload
if payload:
all_ascii_letters = string.ascii_letters
random_chars = ''.join(random.choice(all_ascii_letters) for i in range(3))
retVal = retVal.replace("<here>",random_chars)
return retVal | a1cdd08359696ebf3d82b4b7dbfd4abc060ed0fa | 45,128 |
import json
def get_metadata(bf):
""" Return the metadata of a BIDSFile
Parameters
----------
bf : BIDSFile object
Returns
-------
Dictionnary containing the metadata
"""
filename = bf.path.replace(
'.' + bf.get_entities()['extension'], '')
with open(filename + '.json', 'r') as handle:
return json.load(handle) | 6d7503bd1da892a0e8d06c25fd95fb6b3fa51d95 | 45,129 |
import random
def random_message(length):
""" Generates a block of random bytes of a user-specified length. """
return bytes([random.randint(0, 255) for x in range(length)]) | 569b98033307bab548dbf636e2be12a2e4e846e4 | 45,130 |
import math
def df2idf(docfreq, totaldocs, log_base=2.0, add=0.0):
"""
Compute default inverse-document-frequency for a term with document frequency `doc_freq`::
idf = add + log(totaldocs / doc_freq)
"""
return add + math.log(1.0 * totaldocs / docfreq, log_base) | 983b31e2b15471091f482445f624c2d412b20df7 | 45,131 |
def gen_corpora (text_files):
"""Создаёт текстовый корпус из кучи словарей (или любых текстовых файлов)."""
dict_corpora = {}
for file in text_files:
f = open(file, "r")
text = f.read()
dict_corpora[file] = text
f.close()
return dict_corpora | f97a4e7e85da1e7b72edc8810b9573f884b54a28 | 45,132 |
import re
def get_point_cloud_range(filename):
"""[获取点云范围]
Args:
filename ([type]): [点云配置文件]
Returns:
[type]: [获取x最小值,y最小值,z最小值,x最大值,y最大值,z最大值]
"""
f = open(filename, 'r')
lines = f.readlines()
point_range_line = lines[8]
datapat = re.compile(r'\[(.*)\]')
match = datapat.findall(point_range_line)
min_x, min_y, min_z, max_x, max_y, max_z = [
float(i.strip()) for i in match[0].split(',')
]
return min_x, min_y, min_z, max_x, max_y, max_z | d267c95d27e5605b0a72fcd25f7f75b918e5fb15 | 45,134 |
def build_class(names):
""" Format ZLabels class file.
>>> names = ['Label.AColleague', 'Label.ZipPostalCode']
>>> output = build_class(names)
>>> print output
@isTest
private class ZLabels {
private static List<String> labels = new List<String> {
Label.AColleague,
Label.ZipPostalCode
};
}
<BLANKLINE>
"""
header = '@isTest\nprivate class ZLabels {\n private static List<String> labels = new List<String> {\n'
footer = ' };\n}\n'
content = ''.join(' ' + n + ',' + '\n' for n in names)
content = content[:-2] + '\n'
return header + content + footer | 1b8c51fc27567861d70556b978f95b514d0163b4 | 45,138 |
def do_something_and_return_a_value(func):
"""
DECORATOR 4: do_something_and_return_a_value comments
"""
def wrapper_do_something_and_return_a_value(*args, **kwargs):
return func(*args, **kwargs)
return wrapper_do_something_and_return_a_value | 9a12bcc38a7064d48d3ebf505cb65a2dc645ed3c | 45,139 |
def sqlite_db_tables(c=None):
"""
List the tables of a sqlite database.
How do I list all tables/indices contained in an SQLite database :
https://www.sqlite.org/faq.html#q7
"""
db_tables = []
if c is not None:
c.execute('select name from sqlite_master where type = "table" and name NOT LIKE "%sqlite_%"')
rows = c.fetchall()
for r in rows:
db_tables.append(r['name'])
return db_tables | e0b98eccbbe0a197d00385d115208ac3bf7cf31c | 45,140 |
def score_tomita(trial_strs, tomita_num):
"""
Calculate fraction of trial strings which belong in brackets language
Args:
trial_strs: List of strings we're testing for proper bracketing
"""
num_strs = len(trial_strs)
tomita_fun = globals()[f"tomita_{tomita_num}"]
has_ends = any('^' in s or '$' in s for s in trial_strs)
# Deal with strings which have beginning/end of sequence tokens
if has_ends:
trial_strs = [s[1:-1] for s in trial_strs
if s[0]=='^' and s[-1]=='$']
num_correct = sum(1 for s in trial_strs if tomita_fun(s))
correct_frac = num_correct / num_strs
return correct_frac | 07cd2976473c769a842fc6a34d5e191bd5cd9caa | 45,141 |
import os
import sys
def path_to_module_name(path: str) -> str:
"""Converts a filepath to a module name.
Args:
path (str): The path of the file
Returns:
str: the name of the module.
"""
module_name_path = os.path.abspath(os.path.splitext(path)[0])
valid_paths = list([p for p in sys.path if os.path.isdir(p)])
valid_paths.sort()
for sys_path in valid_paths:
if module_name_path.startswith(sys_path):
module_name_path = os.path.relpath(module_name_path, sys_path)
module_name_path = module_name_path.strip(os.sep).replace(os.sep, ".")
return module_name_path | 37df1660d6e77891c06046d12d94422a8a3b2c9a | 45,143 |
def get_c_air(T: float) -> float:
"""
Returns:
空気の定圧比熱, J/(kg・K)
"""
return 1006.0 | ef57c6d0cbdf520a79f2c707491b4ff956f27195 | 45,144 |
def read_file(fname) -> list[list[int]]:
"""
Each line returns as is_on,x1,x2,y1,y2,z1,z2
"""
with open(fname) as f:
result = []
for line in f:
a = line.split(" ")
assert len(a) == 2
assert a[0] in ("on", "off")
is_on = (a[0] == "on") + 0
r: list[int] = [is_on] + [0] * 6
b = a[1].split(",") # each element looks like 'y=-20..30'
for i, el in enumerate(b):
assert el[1] == "="
q = el[2:].split("..")
r[i * 2 + 1] = int(q[0]) # +1 because first element is is_on
r[i * 2 + 2] = int(q[1])
result.append(r)
return result | f6ebb93707024b0c3f477346f03e06deb9480d46 | 45,145 |
def add_incomplete_stamp(mpl_axis, xloc, yloc):
"""
"""
mpl_axis.text(
x=xloc,
y=yloc,
s='incomplete',
color='red',
rotation=-45,
alpha=0.5,
fontdict={
'weight': 'bold',
'size': 16
},
transform=mpl_axis.transAxes,
zorder=3
)
return mpl_axis | bbf040843c41e12bb752f1aeb64c49b372cdf565 | 45,146 |
def nearest(house_numbers, n):
"""
Выведите расстояние до ближайшего нуля.
Числа выводите в одну строку, разделяя их пробелами.
"""
distance = [0] + [*house_numbers] + [int(10**9)]
print(distance)
for i in range(n-2, -1, -1):
if distance[i] != 0:
distance[i] = distance[i+1] + 1
print(distance)
for i in range(2, n):
if distance[i] != 0:
distance[i] = min(distance[i-1] + 1, distance[i])
print(distance)
return distance[1:-1] | a515536befa027c4701e897b049aaaae9fe02f49 | 45,148 |
def calc_pokemon_moveset_tdo(atk_a, def_a, hp_a, fast_ppt_a, fast_ept_a, charge_ppe_a,
atk_b, def_b, fast_ppt_b, fast_ept_b, charge_ppe_b,
fast_mult_a=1, charge_mult_a=1, fast_mult_b=1, charge_mult_b=1):
"""Calculate a Pokémon's TDO."""
return ((fast_ppt_a*fast_mult_a + fast_ept_a*charge_ppe_a*charge_mult_a) * atk_a * def_a * hp_a) / \
((fast_ppt_b*fast_mult_b + fast_ept_b*charge_ppe_b*fast_mult_b) * atk_b * def_b) | 7b22355a71ceb36dd32f3345d4d4d61efd1ad85b | 45,149 |
def parse_x12_major_version(x12_implementation_version) -> str:
"""
Parses the x12 major version from an implementation version string.
If the version is invalid, an empty string is returned.
Example:
x = parse_x12_major_version("005010X279A1")
print(x)
# prints 5010
x = parse_x12_major_version("00501")
print(x)
# prints ""
:param x12_implementation_version: The X12 implementation version typically conveyed in ST03
:returns: The x12 major version or an empty string
"""
if x12_implementation_version is None or len(x12_implementation_version) < 6:
return ""
return x12_implementation_version[2:6] | ea7163825ada4d5453ca9e7dcbf915009f41f236 | 45,150 |
import re
def generateTestKey(test_name):
"""
Generate a test 'key' for a given test name.
This must not have illegal chars as it will be used for dict lookup in a template.
Tests must be named such that they will have unique keys.
"""
key = test_name.strip().lower()
key = key.replace(" ", "")
# Remove any characters that cannot be used to represent a variable
key = re.sub(r'[^a-zA-Z0-9]', '', key)
return key | 52fe95fa7f1625f9a01d8e119efdce4231fc5d76 | 45,151 |
from typing import OrderedDict
def to_sample_names(path):
"""
Infer the sample names and thus the sample file name prefix for all metrics
from the metrics file produced by fgbio's DemuxFastqs.
"""
sample_names = []
with open(path, "r") as fh:
line_iter = iter(line.rstrip("\r\n") for line in fh)
header = [line.lower() for line in next(line_iter).split("\t")]
for line in line_iter:
sample_data = line.split("\t")
sample_dict = OrderedDict(zip(header, sample_data))
barcode_name = sample_dict["barcode_name"]
library_name = sample_dict["library_name"]
barcode = sample_dict["barcode"]
sample_name = f"{barcode_name}-{library_name}-{barcode}"
if barcode_name != "unmatched":
sample_names.append(sample_name)
return sample_names | fbf5a3d326536ce36805b5ceb0d69490dfc661ac | 45,152 |
def deltawords(num, arg):
"""An adverb to come after the word 'improved' or 'slipped'"""
delta = abs(num - arg)
# We only pick out changes over 10%; over 30% in 9 months is unheard of.
if delta == 0:
word = "not at all"
elif delta < 10:
word = "slightly"
elif delta < 20:
word = "moderately"
elif delta < 30:
word = "considerably"
else:
word = "massively"
return word | 354fd7f2f5e029cbbe6d1d0160fcf6c26cf4acab | 45,154 |
def is_alt(chrom):
"""
check if chromosome is an ALT
"""
return chrom.endswith("_alt") | 88a61db7ca7cbb4fe6533f7af8d625674d336232 | 45,155 |
def sec_query_allow(mech, query):
"""
Quick default to allow all feature combinations which could
negatively affect security.
:param mech: The chosen SASL mechanism
:param query: An encoding of the combination of enabled and
disabled features which may affect security.
:returns: ``True``
"""
return True | 460acc015cb5cc2ea252a1d2dc7ceaf6e15d530d | 45,156 |
import pickle
def load_model():
"""Load the model from disk."""
filename = 'model/finalized_model.sav'
return pickle.load(open(filename, 'rb')) | 271201605a3da85f2cfeeed3350d6bab6a5bab49 | 45,157 |
def inConvergenceCorridor(d_struct, d_gc, BS_d_struct, BS_d_gc):
"""
Check if a solutions qualities are within the convergence corridor
"""
struct_var = ((BS_d_struct / float(4)) + 3) * 4
gc_var = (BS_d_gc + 1 / float(100) * 5) + BS_d_gc + 1
if d_struct <= struct_var and d_gc <= gc_var:
return True
else:
return False | a86bd86ad259c9695fa5c4983a4715a39f7fd53b | 45,158 |
import os
def extract_file_name(file_path, extract_file_extension):
"""Takes a file route and returns the name with or without its extesion.
This function is OS independent.
INPUT:
file_path: string.
extract_file_extension: boolean.
OUTPUT:
string representing the file name.
EXAMPLES:
'bar.txt', './bar.txt', 'C:\foo\bar.txt' or './foo/bar.txt' will
all return 'bar.txt' with the default keyword, otherwise returns 'bar'.
"""
file_name_with_extension = os.path.split(file_path)[-1]
if extract_file_extension:
return file_name_with_extension
else:
extension_beginning = file_name_with_extension.rfind('.')
return file_name_with_extension[:extension_beginning] | 921f99266aedfc97a8c567274275fd9704d0b088 | 45,159 |
def find_root(cfg):
"""
Find a root node for the given cfg
"""
toVisit = set()
for adj in cfg['adjacency']:
for n in adj:
toVisit.add(n['id'])
for i in range(len(cfg['nodes'])):
if cfg['nodes'][i]['id'] not in toVisit:
return i
return 0 | 54436087cd5793771d457d349f1362e7bc6052f5 | 45,160 |
def process_arg(arg):
"""Processes the argument string
Args:
String to process as a runtime command line argument
Return:
Returns true if the argument was recognised and accepted
"""
return False | cecff75ddee1654d581005078bbc2b642201d164 | 45,161 |
def basis(u, cumul_var, p = 0.5):
"""Return the minimum number of basis vectors
from matrix U such that they account for at least p percent
of total variance.
Hint: Do the singular values really represent the variance?
Args:
u: (M, M) numpy array containing principal components.
For example, i'th vector is u[:, i]
cumul_var: (N, ) numpy array, variance along the principal components.
Returns:
v: (M, D) numpy array, contains M principal components from N
containing at most p (percentile) of the variance.
"""
#
# You code here
#
d = 0
var_cm = 0
# make sure d is at least 1
p = max(p, 1e-8)
while var_cm / cumul_var[-1] < p and d < len(cumul_var):
var_cm = cumul_var[d]
d += 1
v = u[:, :d]
return v | 7c25674bddc0f6a9156ed79a6e26dab4b09b3112 | 45,162 |
def get_average(pixels):
"""
Given a list of pixels, finds the average red, blue, and green values
Input:
pixels (List[Pixel]): list of pixels to be averaged
Returns:
rgb (List[int]): list of average red, green, blue values across pixels respectively
Assumes you are returning in the order: [red, green, blue]
"""
avg_red = 0
avg_green = 0
avg_blue = 0
# 在pixels list上一個個找過去,獲得它的rgb值
for i in range(len(pixels)):
avg_red += pixels[i].red
avg_green += pixels[i].green
avg_blue += pixels[i].blue
# 獲得平均數
avg_red /= len(pixels)
avg_green /= len(pixels)
avg_blue /= len(pixels)
rgb_list = [int(avg_red), int(avg_green), int(avg_blue)]
return rgb_list | a20c875db1c99e82c449cef0dadc6df138f6f556 | 45,163 |
import sys
def get_writer(filename):
"""Get a suitable writer given a filename."""
return sys.stdout if filename is None else open(filename, "w") | 9f7332db87cef72d776bd538fb78b2dbb8387c6c | 45,164 |
from typing import Iterable
def params_to_kwargs(params_names: Iterable, params: Iterable) -> dict:
"""Merge iterables of names and values to dict"""
return dict(zip(params_names, params)) | 1849bceb254a6b6019a8da9cd8a32471ecbde600 | 45,165 |
import re
def routeup_from_config(config_file_obj):
"""Extract any preexisting route-up directive from an OpenVPN config."""
route_up = None
for line in config_file_obj:
match = re.match(r'\s*route-up (.*)', line)
if match:
route_up = match.group(1).strip()
if route_up:
# openvpn's config parser requires that script directives only take
# a single argument, so the executable has to be quoted together with
# its arguments. strip off a layer of " as necessary:
if route_up.startswith('"'):
# this does not check if the final quote is escaped, but it's fine,
# we can be a little more permissive than openvpn itself
if route_up.endswith('"'):
return route_up[1:-1]
else:
raise ValueError('Bad route-up directive', route_up)
else:
return route_up
else:
return None | b05f32230164b13f4343986e3786030d166aaae6 | 45,166 |
def result(target, is_like):
"""
Imprime el SO que puede ser el servidor dado
"""
if is_like == "unix64" or is_like == "unix254":
return "El sitio {} es un SO *nix".format(target)
else:
return "El sitio {} es un SO Windows".format(target) | a76511e6c945ddb9dc194ecb415a077e44314a3f | 45,167 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.