id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
242,400
|
EnigmaBridge/client.py
|
ebclient/eb_create_uo.py
|
CreateUO.build_imported_object
|
def build_imported_object(configuration, tpl_import_req, import_resp):
"""
Builds uo from the imported object to the EB.
Imported object = result of CreateUserObject call in EB.
Returns usable uo - you may call ProcessData with it.
"""
if import_resp is None \
or import_resp.response is None \
or 'result' not in import_resp.response \
or 'handle' not in import_resp.response['result']:
logger.info('Invalid result: %s', import_resp)
raise InvalidResponse('Invalid import result')
# TEST_API00000022480000300004
handle = import_resp.response['result']['handle']
handle_len = len(handle)
api_key = handle[0: handle_len-10-10]
uo_id_str = handle[handle_len-10-10+2: handle_len-10]
uo_type_str = handle[handle_len-10+2:]
uo_id = bytes_to_long(from_hex(uo_id_str))
uo_type = bytes_to_long(from_hex(uo_type_str))
uo = UO(uo_id=uo_id,
uo_type=uo_type,
enc_key=tpl_import_req.keys[KeyTypes.COMM_ENC].key,
mac_key=tpl_import_req.keys[KeyTypes.COMM_MAC].key)
uo.configuration = configuration
# Store API key only if it differs from slot.
if configuration is not None and configuration.api_key != api_key:
uo.api_key = api_key
return uo
|
python
|
def build_imported_object(configuration, tpl_import_req, import_resp):
"""
Builds uo from the imported object to the EB.
Imported object = result of CreateUserObject call in EB.
Returns usable uo - you may call ProcessData with it.
"""
if import_resp is None \
or import_resp.response is None \
or 'result' not in import_resp.response \
or 'handle' not in import_resp.response['result']:
logger.info('Invalid result: %s', import_resp)
raise InvalidResponse('Invalid import result')
# TEST_API00000022480000300004
handle = import_resp.response['result']['handle']
handle_len = len(handle)
api_key = handle[0: handle_len-10-10]
uo_id_str = handle[handle_len-10-10+2: handle_len-10]
uo_type_str = handle[handle_len-10+2:]
uo_id = bytes_to_long(from_hex(uo_id_str))
uo_type = bytes_to_long(from_hex(uo_type_str))
uo = UO(uo_id=uo_id,
uo_type=uo_type,
enc_key=tpl_import_req.keys[KeyTypes.COMM_ENC].key,
mac_key=tpl_import_req.keys[KeyTypes.COMM_MAC].key)
uo.configuration = configuration
# Store API key only if it differs from slot.
if configuration is not None and configuration.api_key != api_key:
uo.api_key = api_key
return uo
|
[
"def",
"build_imported_object",
"(",
"configuration",
",",
"tpl_import_req",
",",
"import_resp",
")",
":",
"if",
"import_resp",
"is",
"None",
"or",
"import_resp",
".",
"response",
"is",
"None",
"or",
"'result'",
"not",
"in",
"import_resp",
".",
"response",
"or",
"'handle'",
"not",
"in",
"import_resp",
".",
"response",
"[",
"'result'",
"]",
":",
"logger",
".",
"info",
"(",
"'Invalid result: %s'",
",",
"import_resp",
")",
"raise",
"InvalidResponse",
"(",
"'Invalid import result'",
")",
"# TEST_API00000022480000300004",
"handle",
"=",
"import_resp",
".",
"response",
"[",
"'result'",
"]",
"[",
"'handle'",
"]",
"handle_len",
"=",
"len",
"(",
"handle",
")",
"api_key",
"=",
"handle",
"[",
"0",
":",
"handle_len",
"-",
"10",
"-",
"10",
"]",
"uo_id_str",
"=",
"handle",
"[",
"handle_len",
"-",
"10",
"-",
"10",
"+",
"2",
":",
"handle_len",
"-",
"10",
"]",
"uo_type_str",
"=",
"handle",
"[",
"handle_len",
"-",
"10",
"+",
"2",
":",
"]",
"uo_id",
"=",
"bytes_to_long",
"(",
"from_hex",
"(",
"uo_id_str",
")",
")",
"uo_type",
"=",
"bytes_to_long",
"(",
"from_hex",
"(",
"uo_type_str",
")",
")",
"uo",
"=",
"UO",
"(",
"uo_id",
"=",
"uo_id",
",",
"uo_type",
"=",
"uo_type",
",",
"enc_key",
"=",
"tpl_import_req",
".",
"keys",
"[",
"KeyTypes",
".",
"COMM_ENC",
"]",
".",
"key",
",",
"mac_key",
"=",
"tpl_import_req",
".",
"keys",
"[",
"KeyTypes",
".",
"COMM_MAC",
"]",
".",
"key",
")",
"uo",
".",
"configuration",
"=",
"configuration",
"# Store API key only if it differs from slot.",
"if",
"configuration",
"is",
"not",
"None",
"and",
"configuration",
".",
"api_key",
"!=",
"api_key",
":",
"uo",
".",
"api_key",
"=",
"api_key",
"return",
"uo"
] |
Builds uo from the imported object to the EB.
Imported object = result of CreateUserObject call in EB.
Returns usable uo - you may call ProcessData with it.
|
[
"Builds",
"uo",
"from",
"the",
"imported",
"object",
"to",
"the",
"EB",
".",
"Imported",
"object",
"=",
"result",
"of",
"CreateUserObject",
"call",
"in",
"EB",
".",
"Returns",
"usable",
"uo",
"-",
"you",
"may",
"call",
"ProcessData",
"with",
"it",
"."
] |
0fafe3902da394da88e9f960751d695ca65bbabd
|
https://github.com/EnigmaBridge/client.py/blob/0fafe3902da394da88e9f960751d695ca65bbabd/ebclient/eb_create_uo.py#L274-L309
|
242,401
|
EnigmaBridge/client.py
|
ebclient/eb_create_uo.py
|
TemplateProcessor.read_serialized_rsa_pub_key
|
def read_serialized_rsa_pub_key(serialized):
"""
Reads serialized RSA pub key
TAG|len-2B|value. 81 = exponent, 82 = modulus
:param serialized:
:return: n, e
"""
n = None
e = None
rsa = from_hex(serialized)
pos = 0
ln = len(rsa)
while pos < ln:
tag = bytes_to_byte(rsa, pos)
pos += 1
length = bytes_to_short(rsa, pos)
pos += 2
if tag == 0x81:
e = bytes_to_long(rsa[pos:pos+length])
elif tag == 0x82:
n = bytes_to_long(rsa[pos:pos+length])
pos += length
if e is None or n is None:
logger.warning("Could not process import key")
raise ValueError('Public key deserialization failed')
return n, e
|
python
|
def read_serialized_rsa_pub_key(serialized):
"""
Reads serialized RSA pub key
TAG|len-2B|value. 81 = exponent, 82 = modulus
:param serialized:
:return: n, e
"""
n = None
e = None
rsa = from_hex(serialized)
pos = 0
ln = len(rsa)
while pos < ln:
tag = bytes_to_byte(rsa, pos)
pos += 1
length = bytes_to_short(rsa, pos)
pos += 2
if tag == 0x81:
e = bytes_to_long(rsa[pos:pos+length])
elif tag == 0x82:
n = bytes_to_long(rsa[pos:pos+length])
pos += length
if e is None or n is None:
logger.warning("Could not process import key")
raise ValueError('Public key deserialization failed')
return n, e
|
[
"def",
"read_serialized_rsa_pub_key",
"(",
"serialized",
")",
":",
"n",
"=",
"None",
"e",
"=",
"None",
"rsa",
"=",
"from_hex",
"(",
"serialized",
")",
"pos",
"=",
"0",
"ln",
"=",
"len",
"(",
"rsa",
")",
"while",
"pos",
"<",
"ln",
":",
"tag",
"=",
"bytes_to_byte",
"(",
"rsa",
",",
"pos",
")",
"pos",
"+=",
"1",
"length",
"=",
"bytes_to_short",
"(",
"rsa",
",",
"pos",
")",
"pos",
"+=",
"2",
"if",
"tag",
"==",
"0x81",
":",
"e",
"=",
"bytes_to_long",
"(",
"rsa",
"[",
"pos",
":",
"pos",
"+",
"length",
"]",
")",
"elif",
"tag",
"==",
"0x82",
":",
"n",
"=",
"bytes_to_long",
"(",
"rsa",
"[",
"pos",
":",
"pos",
"+",
"length",
"]",
")",
"pos",
"+=",
"length",
"if",
"e",
"is",
"None",
"or",
"n",
"is",
"None",
":",
"logger",
".",
"warning",
"(",
"\"Could not process import key\"",
")",
"raise",
"ValueError",
"(",
"'Public key deserialization failed'",
")",
"return",
"n",
",",
"e"
] |
Reads serialized RSA pub key
TAG|len-2B|value. 81 = exponent, 82 = modulus
:param serialized:
:return: n, e
|
[
"Reads",
"serialized",
"RSA",
"pub",
"key",
"TAG|len",
"-",
"2B|value",
".",
"81",
"=",
"exponent",
"82",
"=",
"modulus"
] |
0fafe3902da394da88e9f960751d695ca65bbabd
|
https://github.com/EnigmaBridge/client.py/blob/0fafe3902da394da88e9f960751d695ca65bbabd/ebclient/eb_create_uo.py#L532-L563
|
242,402
|
hobson/pug-dj
|
pug/dj/crawlnmine/crawlnmine/settings/common.py
|
env
|
def env(var_name, default=False):
""" Get the environment variable or assume a default, but let the user know about the error."""
try:
value = os.environ[var_name]
if str(value).strip().lower() in ['false', 'no', 'off' '0', 'none', 'null']:
return None
return value
except:
from traceback import format_exc
msg = format_exc() + '\n' + "Unable to find the %s environment variable.\nUsing the value %s (the default) instead.\n" % (var_name, default)
warnings.warn(msg)
return default
|
python
|
def env(var_name, default=False):
""" Get the environment variable or assume a default, but let the user know about the error."""
try:
value = os.environ[var_name]
if str(value).strip().lower() in ['false', 'no', 'off' '0', 'none', 'null']:
return None
return value
except:
from traceback import format_exc
msg = format_exc() + '\n' + "Unable to find the %s environment variable.\nUsing the value %s (the default) instead.\n" % (var_name, default)
warnings.warn(msg)
return default
|
[
"def",
"env",
"(",
"var_name",
",",
"default",
"=",
"False",
")",
":",
"try",
":",
"value",
"=",
"os",
".",
"environ",
"[",
"var_name",
"]",
"if",
"str",
"(",
"value",
")",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"in",
"[",
"'false'",
",",
"'no'",
",",
"'off'",
"'0'",
",",
"'none'",
",",
"'null'",
"]",
":",
"return",
"None",
"return",
"value",
"except",
":",
"from",
"traceback",
"import",
"format_exc",
"msg",
"=",
"format_exc",
"(",
")",
"+",
"'\\n'",
"+",
"\"Unable to find the %s environment variable.\\nUsing the value %s (the default) instead.\\n\"",
"%",
"(",
"var_name",
",",
"default",
")",
"warnings",
".",
"warn",
"(",
"msg",
")",
"return",
"default"
] |
Get the environment variable or assume a default, but let the user know about the error.
|
[
"Get",
"the",
"environment",
"variable",
"or",
"assume",
"a",
"default",
"but",
"let",
"the",
"user",
"know",
"about",
"the",
"error",
"."
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/crawlnmine/crawlnmine/settings/common.py#L20-L31
|
242,403
|
saltzm/yadi
|
yadi/datalog2sql/ast2sql/safety_checker.py
|
ConjunctiveQuerySafetyChecker.check_negated_goals
|
def check_negated_goals(self,safe_variables,query):
''' Create a list of variables which occur in negated goals. '''
variables_in_negated_goals = \
[y for x in query.relations for y in list(x.variables) if x.is_negated()]
# And check them:
for variable in variables_in_negated_goals:
if not (variable in safe_variables):
raise NotSafeException('Query not safe because ' + \
variable.name + \
' from a negated goal does not occur in a positive goal')
return False
return True
|
python
|
def check_negated_goals(self,safe_variables,query):
''' Create a list of variables which occur in negated goals. '''
variables_in_negated_goals = \
[y for x in query.relations for y in list(x.variables) if x.is_negated()]
# And check them:
for variable in variables_in_negated_goals:
if not (variable in safe_variables):
raise NotSafeException('Query not safe because ' + \
variable.name + \
' from a negated goal does not occur in a positive goal')
return False
return True
|
[
"def",
"check_negated_goals",
"(",
"self",
",",
"safe_variables",
",",
"query",
")",
":",
"variables_in_negated_goals",
"=",
"[",
"y",
"for",
"x",
"in",
"query",
".",
"relations",
"for",
"y",
"in",
"list",
"(",
"x",
".",
"variables",
")",
"if",
"x",
".",
"is_negated",
"(",
")",
"]",
"# And check them:",
"for",
"variable",
"in",
"variables_in_negated_goals",
":",
"if",
"not",
"(",
"variable",
"in",
"safe_variables",
")",
":",
"raise",
"NotSafeException",
"(",
"'Query not safe because '",
"+",
"variable",
".",
"name",
"+",
"' from a negated goal does not occur in a positive goal'",
")",
"return",
"False",
"return",
"True"
] |
Create a list of variables which occur in negated goals.
|
[
"Create",
"a",
"list",
"of",
"variables",
"which",
"occur",
"in",
"negated",
"goals",
"."
] |
755790167c350e650c1e8b15c6f9209a97be9e42
|
https://github.com/saltzm/yadi/blob/755790167c350e650c1e8b15c6f9209a97be9e42/yadi/datalog2sql/ast2sql/safety_checker.py#L41-L54
|
242,404
|
saltzm/yadi
|
yadi/datalog2sql/ast2sql/safety_checker.py
|
ConjunctiveQuerySafetyChecker.check_non_equality_explicit_constraints
|
def check_non_equality_explicit_constraints(self,safe_variables,query):
''' Checking variables which occur in explicit constraints with non equality
operators '''
# Create a list of variables which occur in explicit constraints with non
# equality operators
variables_in_constraints_with_non_equality_operators = \
[y for x in query.constraints \
for y in [x.get_left_side(), x.get_right_side()] \
if y.is_variable() and not x.is_equality_constraint()]
for variable in variables_in_constraints_with_non_equality_operators:
if not (variable in safe_variables):
raise NotSafeException('Query not safe because ' + \
variable.name + \
' from a non_equality comparison does not occur in a positive goal')
return False
return True
|
python
|
def check_non_equality_explicit_constraints(self,safe_variables,query):
''' Checking variables which occur in explicit constraints with non equality
operators '''
# Create a list of variables which occur in explicit constraints with non
# equality operators
variables_in_constraints_with_non_equality_operators = \
[y for x in query.constraints \
for y in [x.get_left_side(), x.get_right_side()] \
if y.is_variable() and not x.is_equality_constraint()]
for variable in variables_in_constraints_with_non_equality_operators:
if not (variable in safe_variables):
raise NotSafeException('Query not safe because ' + \
variable.name + \
' from a non_equality comparison does not occur in a positive goal')
return False
return True
|
[
"def",
"check_non_equality_explicit_constraints",
"(",
"self",
",",
"safe_variables",
",",
"query",
")",
":",
"# Create a list of variables which occur in explicit constraints with non",
"# equality operators",
"variables_in_constraints_with_non_equality_operators",
"=",
"[",
"y",
"for",
"x",
"in",
"query",
".",
"constraints",
"for",
"y",
"in",
"[",
"x",
".",
"get_left_side",
"(",
")",
",",
"x",
".",
"get_right_side",
"(",
")",
"]",
"if",
"y",
".",
"is_variable",
"(",
")",
"and",
"not",
"x",
".",
"is_equality_constraint",
"(",
")",
"]",
"for",
"variable",
"in",
"variables_in_constraints_with_non_equality_operators",
":",
"if",
"not",
"(",
"variable",
"in",
"safe_variables",
")",
":",
"raise",
"NotSafeException",
"(",
"'Query not safe because '",
"+",
"variable",
".",
"name",
"+",
"' from a non_equality comparison does not occur in a positive goal'",
")",
"return",
"False",
"return",
"True"
] |
Checking variables which occur in explicit constraints with non equality
operators
|
[
"Checking",
"variables",
"which",
"occur",
"in",
"explicit",
"constraints",
"with",
"non",
"equality",
"operators"
] |
755790167c350e650c1e8b15c6f9209a97be9e42
|
https://github.com/saltzm/yadi/blob/755790167c350e650c1e8b15c6f9209a97be9e42/yadi/datalog2sql/ast2sql/safety_checker.py#L56-L73
|
242,405
|
tax/snor
|
snor/utils.py
|
create_database
|
def create_database(destroy_existing=False):
""" Create db and tables if it doesn't exist """
if not os.path.exists(DB_NAME):
logger.info('Create database: {0}'.format(DB_NAME))
open(DB_NAME, 'a').close()
Show.create_table()
Episode.create_table()
Setting.create_table()
|
python
|
def create_database(destroy_existing=False):
""" Create db and tables if it doesn't exist """
if not os.path.exists(DB_NAME):
logger.info('Create database: {0}'.format(DB_NAME))
open(DB_NAME, 'a').close()
Show.create_table()
Episode.create_table()
Setting.create_table()
|
[
"def",
"create_database",
"(",
"destroy_existing",
"=",
"False",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"DB_NAME",
")",
":",
"logger",
".",
"info",
"(",
"'Create database: {0}'",
".",
"format",
"(",
"DB_NAME",
")",
")",
"open",
"(",
"DB_NAME",
",",
"'a'",
")",
".",
"close",
"(",
")",
"Show",
".",
"create_table",
"(",
")",
"Episode",
".",
"create_table",
"(",
")",
"Setting",
".",
"create_table",
"(",
")"
] |
Create db and tables if it doesn't exist
|
[
"Create",
"db",
"and",
"tables",
"if",
"it",
"doesn",
"t",
"exist"
] |
0d639bd6913066db743a64b4a14a384024e4ab7d
|
https://github.com/tax/snor/blob/0d639bd6913066db743a64b4a14a384024e4ab7d/snor/utils.py#L216-L223
|
242,406
|
kryptn/Pantry
|
demo.py
|
magic_api
|
def magic_api(word):
"""
This is our magic API that we're simulating.
It'll return a random number and a cache timer.
"""
result = sum(ord(x)-65 + randint(1,50) for x in word)
delta = timedelta(seconds=result)
cached_until = datetime.now() + delta
return result, cached_until
|
python
|
def magic_api(word):
"""
This is our magic API that we're simulating.
It'll return a random number and a cache timer.
"""
result = sum(ord(x)-65 + randint(1,50) for x in word)
delta = timedelta(seconds=result)
cached_until = datetime.now() + delta
return result, cached_until
|
[
"def",
"magic_api",
"(",
"word",
")",
":",
"result",
"=",
"sum",
"(",
"ord",
"(",
"x",
")",
"-",
"65",
"+",
"randint",
"(",
"1",
",",
"50",
")",
"for",
"x",
"in",
"word",
")",
"delta",
"=",
"timedelta",
"(",
"seconds",
"=",
"result",
")",
"cached_until",
"=",
"datetime",
".",
"now",
"(",
")",
"+",
"delta",
"return",
"result",
",",
"cached_until"
] |
This is our magic API that we're simulating.
It'll return a random number and a cache timer.
|
[
"This",
"is",
"our",
"magic",
"API",
"that",
"we",
"re",
"simulating",
".",
"It",
"ll",
"return",
"a",
"random",
"number",
"and",
"a",
"cache",
"timer",
"."
] |
2e1c30f8b04127c4fd0e42451a3047d1300ac45f
|
https://github.com/kryptn/Pantry/blob/2e1c30f8b04127c4fd0e42451a3047d1300ac45f/demo.py#L6-L15
|
242,407
|
hobson/pug-invest
|
pug/invest/sandbox/sim.py
|
portfolio_prices
|
def portfolio_prices(
symbols=("AAPL", "GLD", "GOOG", "$SPX", "XOM", "msft"),
start=datetime.datetime(2005, 1, 1),
end=datetime.datetime(2011, 12, 31), # data stops at 2013/1/1
normalize=True,
allocation=None,
price_type='actual_close',
):
"""Calculate the Sharpe Ratio and other performance metrics for a portfolio
Arguments:
symbols (list of str): Ticker symbols like "GOOG", "AAPL", etc
start (datetime): The date at the start of the period being analyzed.
end (datetime): The date at the end of the period being analyzed.
normalize (bool): Whether to normalize prices to 1 at the start of the time series.
allocation (list of float): The portion of the portfolio allocated to each equity.
"""
symbols = normalize_symbols(symbols)
start = util.normalize_date(start)
end = util.normalize_date(end)
if allocation is None:
allocation = [1. / len(symbols)] * len(symbols)
if len(allocation) < len(symbols):
allocation = list(allocation) + [1. / len(symbols)] * (len(symbols) - len(allocation))
total = np.sum(allocation.sum)
allocation = np.array([(float(a) / total) for a in allocation])
timestamps = du.getNYSEdays(start, end, datetime.timedelta(hours=16))
ls_keys = [price_type]
ldf_data = da.get_data(timestamps, symbols, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
na_price = d_data[price_type].values
if normalize:
na_price /= na_price[0, :]
na_price *= allocation
return np.sum(na_price, axis=1)
|
python
|
def portfolio_prices(
symbols=("AAPL", "GLD", "GOOG", "$SPX", "XOM", "msft"),
start=datetime.datetime(2005, 1, 1),
end=datetime.datetime(2011, 12, 31), # data stops at 2013/1/1
normalize=True,
allocation=None,
price_type='actual_close',
):
"""Calculate the Sharpe Ratio and other performance metrics for a portfolio
Arguments:
symbols (list of str): Ticker symbols like "GOOG", "AAPL", etc
start (datetime): The date at the start of the period being analyzed.
end (datetime): The date at the end of the period being analyzed.
normalize (bool): Whether to normalize prices to 1 at the start of the time series.
allocation (list of float): The portion of the portfolio allocated to each equity.
"""
symbols = normalize_symbols(symbols)
start = util.normalize_date(start)
end = util.normalize_date(end)
if allocation is None:
allocation = [1. / len(symbols)] * len(symbols)
if len(allocation) < len(symbols):
allocation = list(allocation) + [1. / len(symbols)] * (len(symbols) - len(allocation))
total = np.sum(allocation.sum)
allocation = np.array([(float(a) / total) for a in allocation])
timestamps = du.getNYSEdays(start, end, datetime.timedelta(hours=16))
ls_keys = [price_type]
ldf_data = da.get_data(timestamps, symbols, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
na_price = d_data[price_type].values
if normalize:
na_price /= na_price[0, :]
na_price *= allocation
return np.sum(na_price, axis=1)
|
[
"def",
"portfolio_prices",
"(",
"symbols",
"=",
"(",
"\"AAPL\"",
",",
"\"GLD\"",
",",
"\"GOOG\"",
",",
"\"$SPX\"",
",",
"\"XOM\"",
",",
"\"msft\"",
")",
",",
"start",
"=",
"datetime",
".",
"datetime",
"(",
"2005",
",",
"1",
",",
"1",
")",
",",
"end",
"=",
"datetime",
".",
"datetime",
"(",
"2011",
",",
"12",
",",
"31",
")",
",",
"# data stops at 2013/1/1",
"normalize",
"=",
"True",
",",
"allocation",
"=",
"None",
",",
"price_type",
"=",
"'actual_close'",
",",
")",
":",
"symbols",
"=",
"normalize_symbols",
"(",
"symbols",
")",
"start",
"=",
"util",
".",
"normalize_date",
"(",
"start",
")",
"end",
"=",
"util",
".",
"normalize_date",
"(",
"end",
")",
"if",
"allocation",
"is",
"None",
":",
"allocation",
"=",
"[",
"1.",
"/",
"len",
"(",
"symbols",
")",
"]",
"*",
"len",
"(",
"symbols",
")",
"if",
"len",
"(",
"allocation",
")",
"<",
"len",
"(",
"symbols",
")",
":",
"allocation",
"=",
"list",
"(",
"allocation",
")",
"+",
"[",
"1.",
"/",
"len",
"(",
"symbols",
")",
"]",
"*",
"(",
"len",
"(",
"symbols",
")",
"-",
"len",
"(",
"allocation",
")",
")",
"total",
"=",
"np",
".",
"sum",
"(",
"allocation",
".",
"sum",
")",
"allocation",
"=",
"np",
".",
"array",
"(",
"[",
"(",
"float",
"(",
"a",
")",
"/",
"total",
")",
"for",
"a",
"in",
"allocation",
"]",
")",
"timestamps",
"=",
"du",
".",
"getNYSEdays",
"(",
"start",
",",
"end",
",",
"datetime",
".",
"timedelta",
"(",
"hours",
"=",
"16",
")",
")",
"ls_keys",
"=",
"[",
"price_type",
"]",
"ldf_data",
"=",
"da",
".",
"get_data",
"(",
"timestamps",
",",
"symbols",
",",
"ls_keys",
")",
"d_data",
"=",
"dict",
"(",
"zip",
"(",
"ls_keys",
",",
"ldf_data",
")",
")",
"na_price",
"=",
"d_data",
"[",
"price_type",
"]",
".",
"values",
"if",
"normalize",
":",
"na_price",
"/=",
"na_price",
"[",
"0",
",",
":",
"]",
"na_price",
"*=",
"allocation",
"return",
"np",
".",
"sum",
"(",
"na_price",
",",
"axis",
"=",
"1",
")"
] |
Calculate the Sharpe Ratio and other performance metrics for a portfolio
Arguments:
symbols (list of str): Ticker symbols like "GOOG", "AAPL", etc
start (datetime): The date at the start of the period being analyzed.
end (datetime): The date at the end of the period being analyzed.
normalize (bool): Whether to normalize prices to 1 at the start of the time series.
allocation (list of float): The portion of the portfolio allocated to each equity.
|
[
"Calculate",
"the",
"Sharpe",
"Ratio",
"and",
"other",
"performance",
"metrics",
"for",
"a",
"portfolio"
] |
836911258a0e920083a88c91beae88eefdebb20c
|
https://github.com/hobson/pug-invest/blob/836911258a0e920083a88c91beae88eefdebb20c/pug/invest/sandbox/sim.py#L216-L253
|
242,408
|
hobson/pug-invest
|
pug/invest/sandbox/sim.py
|
symbol_bollinger
|
def symbol_bollinger(symbol='GOOG',
start=datetime.datetime(2008, 1, 1), end=datetime.datetime(2009, 12, 31), price_type='close', cleaner=clean_dataframe,
window=20, sigma=1.):
"""Calculate the Bolinger indicator value
>>> symbol_bollinger("goog", '2008-1-1', '2008-2-1')[-1] # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
-1.8782...
"""
symbols = normalize_symbols(symbol)
prices = price_dataframe(symbols, start=start, end=end, price_type=price_type, cleaner=cleaner)
return series_bollinger(prices[symbols[0]], window=window, sigma=sigma, plot=False)
|
python
|
def symbol_bollinger(symbol='GOOG',
start=datetime.datetime(2008, 1, 1), end=datetime.datetime(2009, 12, 31), price_type='close', cleaner=clean_dataframe,
window=20, sigma=1.):
"""Calculate the Bolinger indicator value
>>> symbol_bollinger("goog", '2008-1-1', '2008-2-1')[-1] # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
-1.8782...
"""
symbols = normalize_symbols(symbol)
prices = price_dataframe(symbols, start=start, end=end, price_type=price_type, cleaner=cleaner)
return series_bollinger(prices[symbols[0]], window=window, sigma=sigma, plot=False)
|
[
"def",
"symbol_bollinger",
"(",
"symbol",
"=",
"'GOOG'",
",",
"start",
"=",
"datetime",
".",
"datetime",
"(",
"2008",
",",
"1",
",",
"1",
")",
",",
"end",
"=",
"datetime",
".",
"datetime",
"(",
"2009",
",",
"12",
",",
"31",
")",
",",
"price_type",
"=",
"'close'",
",",
"cleaner",
"=",
"clean_dataframe",
",",
"window",
"=",
"20",
",",
"sigma",
"=",
"1.",
")",
":",
"symbols",
"=",
"normalize_symbols",
"(",
"symbol",
")",
"prices",
"=",
"price_dataframe",
"(",
"symbols",
",",
"start",
"=",
"start",
",",
"end",
"=",
"end",
",",
"price_type",
"=",
"price_type",
",",
"cleaner",
"=",
"cleaner",
")",
"return",
"series_bollinger",
"(",
"prices",
"[",
"symbols",
"[",
"0",
"]",
"]",
",",
"window",
"=",
"window",
",",
"sigma",
"=",
"sigma",
",",
"plot",
"=",
"False",
")"
] |
Calculate the Bolinger indicator value
>>> symbol_bollinger("goog", '2008-1-1', '2008-2-1')[-1] # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
-1.8782...
|
[
"Calculate",
"the",
"Bolinger",
"indicator",
"value"
] |
836911258a0e920083a88c91beae88eefdebb20c
|
https://github.com/hobson/pug-invest/blob/836911258a0e920083a88c91beae88eefdebb20c/pug/invest/sandbox/sim.py#L263-L273
|
242,409
|
hobson/pug-invest
|
pug/invest/sandbox/sim.py
|
symbols_bollinger
|
def symbols_bollinger(symbols='sp5002012',
start=datetime.datetime(2008, 1, 1), end=datetime.datetime(2009, 12, 31), price_type='adjusted_close', cleaner=clean_dataframe,
window=20, sigma=1.):
"""Calculate the Bolinger for a list or set of symbols
Example:
>>> symbols_bollinger(["AAPL", "GOOG", "IBM", "MSFT"], '10-12-01', '10-12-30')[-5:] # doctest: +NORMALIZE_WHITESPACE
GOOG AAPL IBM MSFT
2010-12-23 16:00:00 1.298178 1.185009 1.177220 1.237684
2010-12-27 16:00:00 1.073603 1.371298 0.590403 0.932911
2010-12-28 16:00:00 0.745548 1.436278 0.863406 0.812844
2010-12-29 16:00:00 0.874885 1.464894 2.096242 0.752602
2010-12-30 16:00:00 0.634661 0.793493 1.959324 0.498395
"""
symbols = normalize_symbols(symbols)
prices = price_dataframe(symbols, start=start, end=end, price_type=price_type, cleaner=cleaner)
return frame_bollinger(prices, window=window, sigma=sigma, plot=False)
|
python
|
def symbols_bollinger(symbols='sp5002012',
start=datetime.datetime(2008, 1, 1), end=datetime.datetime(2009, 12, 31), price_type='adjusted_close', cleaner=clean_dataframe,
window=20, sigma=1.):
"""Calculate the Bolinger for a list or set of symbols
Example:
>>> symbols_bollinger(["AAPL", "GOOG", "IBM", "MSFT"], '10-12-01', '10-12-30')[-5:] # doctest: +NORMALIZE_WHITESPACE
GOOG AAPL IBM MSFT
2010-12-23 16:00:00 1.298178 1.185009 1.177220 1.237684
2010-12-27 16:00:00 1.073603 1.371298 0.590403 0.932911
2010-12-28 16:00:00 0.745548 1.436278 0.863406 0.812844
2010-12-29 16:00:00 0.874885 1.464894 2.096242 0.752602
2010-12-30 16:00:00 0.634661 0.793493 1.959324 0.498395
"""
symbols = normalize_symbols(symbols)
prices = price_dataframe(symbols, start=start, end=end, price_type=price_type, cleaner=cleaner)
return frame_bollinger(prices, window=window, sigma=sigma, plot=False)
|
[
"def",
"symbols_bollinger",
"(",
"symbols",
"=",
"'sp5002012'",
",",
"start",
"=",
"datetime",
".",
"datetime",
"(",
"2008",
",",
"1",
",",
"1",
")",
",",
"end",
"=",
"datetime",
".",
"datetime",
"(",
"2009",
",",
"12",
",",
"31",
")",
",",
"price_type",
"=",
"'adjusted_close'",
",",
"cleaner",
"=",
"clean_dataframe",
",",
"window",
"=",
"20",
",",
"sigma",
"=",
"1.",
")",
":",
"symbols",
"=",
"normalize_symbols",
"(",
"symbols",
")",
"prices",
"=",
"price_dataframe",
"(",
"symbols",
",",
"start",
"=",
"start",
",",
"end",
"=",
"end",
",",
"price_type",
"=",
"price_type",
",",
"cleaner",
"=",
"cleaner",
")",
"return",
"frame_bollinger",
"(",
"prices",
",",
"window",
"=",
"window",
",",
"sigma",
"=",
"sigma",
",",
"plot",
"=",
"False",
")"
] |
Calculate the Bolinger for a list or set of symbols
Example:
>>> symbols_bollinger(["AAPL", "GOOG", "IBM", "MSFT"], '10-12-01', '10-12-30')[-5:] # doctest: +NORMALIZE_WHITESPACE
GOOG AAPL IBM MSFT
2010-12-23 16:00:00 1.298178 1.185009 1.177220 1.237684
2010-12-27 16:00:00 1.073603 1.371298 0.590403 0.932911
2010-12-28 16:00:00 0.745548 1.436278 0.863406 0.812844
2010-12-29 16:00:00 0.874885 1.464894 2.096242 0.752602
2010-12-30 16:00:00 0.634661 0.793493 1.959324 0.498395
|
[
"Calculate",
"the",
"Bolinger",
"for",
"a",
"list",
"or",
"set",
"of",
"symbols"
] |
836911258a0e920083a88c91beae88eefdebb20c
|
https://github.com/hobson/pug-invest/blob/836911258a0e920083a88c91beae88eefdebb20c/pug/invest/sandbox/sim.py#L276-L292
|
242,410
|
hobson/pug-invest
|
pug/invest/sandbox/sim.py
|
metrics
|
def metrics(prices, fudge=False, sharpe_days=252., baseline='$SPX'):
"""Calculate the volatiliy, average daily return, Sharpe ratio, and cumulative return
Arguments:
prices (file or basestring or iterable): path to file or file pointer or sequence of prices/values of a portfolio or equity
fudge (bool): Whether to use Tucker Balche's erroneous division by N or the more accurate N-1 for stddev of returns
sharpe_days: Number of trading days in a year. Sharpe ratio = sqrt(sharpe_days) * total_return / std_dev_of_daily_returns
Examples:
>>> metrics(np.array([1,2,3,4])) == {'mean': 0.61111111111111105, 'return': 4.0, 'sharpe': 34.245718429742873, 'std': 0.28327886186626583}
True
>>> metrics(portfolio_prices(symbols=['AAPL', 'GLD', 'GOOG', 'XOM'], start=datetime.datetime(2011,1,1), end=datetime.datetime(2011,12,31), allocations=[0.4, 0.4, 0.0, 0.2])
... ) == {'std': 0.0101467067654, 'mean': 0.000657261102001, 'sharpe': 1.02828403099, 'return': 1.16487261965}
True
"""
if isinstance(prices, basestring) and os.path.isfile(prices):
prices = open(prices, 'rU')
if isinstance(prices, file):
values = {}
csvreader = csv.reader(prices, dialect='excel', quoting=csv.QUOTE_MINIMAL)
for row in csvreader:
# print row
values[tuple(int(s) for s in row[:3])] = row[-1]
prices.close()
prices = [v for (k,v) in sorted(values.items())]
print prices
if isinstance(prices[0], (tuple, list)):
prices = [row[-1] for row in prices]
if sharpe_days == None:
sharpe_days = len(prices)
prices = np.array([float(p) for p in prices])
if not isinstance(fudge, bool) and fudge:
fudge = float(fudge)
elif fudge == True or (isinstance(fudge, float) and fudge == 0.0):
fudge = (len(prices) - 1.) / len(prices)
else:
fudge = 1.0
daily_returns = np.diff(prices) / prices[0:-1]
# print daily_returns
end_price = float(prices[-1])
start_price = (prices[0])
mean = fudge * np.average(daily_returns)
variance = fudge * np.sum((daily_returns - mean) * (daily_returns - mean)) / float(len(daily_returns))
results = {
'standared deviation of daily returns': math.sqrt(variance),
'variance of daily returns': variance,
'average daily return': mean,
'Sharpe ratio': mean * np.sqrt(sharpe_days) / np.sqrt(variance),
'total return': end_price / start_price,
'final value': end_price,
'starting value': start_price,
}
results['return rate'] = results['total return'] - 1.0
return results
|
python
|
def metrics(prices, fudge=False, sharpe_days=252., baseline='$SPX'):
"""Calculate the volatiliy, average daily return, Sharpe ratio, and cumulative return
Arguments:
prices (file or basestring or iterable): path to file or file pointer or sequence of prices/values of a portfolio or equity
fudge (bool): Whether to use Tucker Balche's erroneous division by N or the more accurate N-1 for stddev of returns
sharpe_days: Number of trading days in a year. Sharpe ratio = sqrt(sharpe_days) * total_return / std_dev_of_daily_returns
Examples:
>>> metrics(np.array([1,2,3,4])) == {'mean': 0.61111111111111105, 'return': 4.0, 'sharpe': 34.245718429742873, 'std': 0.28327886186626583}
True
>>> metrics(portfolio_prices(symbols=['AAPL', 'GLD', 'GOOG', 'XOM'], start=datetime.datetime(2011,1,1), end=datetime.datetime(2011,12,31), allocations=[0.4, 0.4, 0.0, 0.2])
... ) == {'std': 0.0101467067654, 'mean': 0.000657261102001, 'sharpe': 1.02828403099, 'return': 1.16487261965}
True
"""
if isinstance(prices, basestring) and os.path.isfile(prices):
prices = open(prices, 'rU')
if isinstance(prices, file):
values = {}
csvreader = csv.reader(prices, dialect='excel', quoting=csv.QUOTE_MINIMAL)
for row in csvreader:
# print row
values[tuple(int(s) for s in row[:3])] = row[-1]
prices.close()
prices = [v for (k,v) in sorted(values.items())]
print prices
if isinstance(prices[0], (tuple, list)):
prices = [row[-1] for row in prices]
if sharpe_days == None:
sharpe_days = len(prices)
prices = np.array([float(p) for p in prices])
if not isinstance(fudge, bool) and fudge:
fudge = float(fudge)
elif fudge == True or (isinstance(fudge, float) and fudge == 0.0):
fudge = (len(prices) - 1.) / len(prices)
else:
fudge = 1.0
daily_returns = np.diff(prices) / prices[0:-1]
# print daily_returns
end_price = float(prices[-1])
start_price = (prices[0])
mean = fudge * np.average(daily_returns)
variance = fudge * np.sum((daily_returns - mean) * (daily_returns - mean)) / float(len(daily_returns))
results = {
'standared deviation of daily returns': math.sqrt(variance),
'variance of daily returns': variance,
'average daily return': mean,
'Sharpe ratio': mean * np.sqrt(sharpe_days) / np.sqrt(variance),
'total return': end_price / start_price,
'final value': end_price,
'starting value': start_price,
}
results['return rate'] = results['total return'] - 1.0
return results
|
[
"def",
"metrics",
"(",
"prices",
",",
"fudge",
"=",
"False",
",",
"sharpe_days",
"=",
"252.",
",",
"baseline",
"=",
"'$SPX'",
")",
":",
"if",
"isinstance",
"(",
"prices",
",",
"basestring",
")",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"prices",
")",
":",
"prices",
"=",
"open",
"(",
"prices",
",",
"'rU'",
")",
"if",
"isinstance",
"(",
"prices",
",",
"file",
")",
":",
"values",
"=",
"{",
"}",
"csvreader",
"=",
"csv",
".",
"reader",
"(",
"prices",
",",
"dialect",
"=",
"'excel'",
",",
"quoting",
"=",
"csv",
".",
"QUOTE_MINIMAL",
")",
"for",
"row",
"in",
"csvreader",
":",
"# print row",
"values",
"[",
"tuple",
"(",
"int",
"(",
"s",
")",
"for",
"s",
"in",
"row",
"[",
":",
"3",
"]",
")",
"]",
"=",
"row",
"[",
"-",
"1",
"]",
"prices",
".",
"close",
"(",
")",
"prices",
"=",
"[",
"v",
"for",
"(",
"k",
",",
"v",
")",
"in",
"sorted",
"(",
"values",
".",
"items",
"(",
")",
")",
"]",
"print",
"prices",
"if",
"isinstance",
"(",
"prices",
"[",
"0",
"]",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"prices",
"=",
"[",
"row",
"[",
"-",
"1",
"]",
"for",
"row",
"in",
"prices",
"]",
"if",
"sharpe_days",
"==",
"None",
":",
"sharpe_days",
"=",
"len",
"(",
"prices",
")",
"prices",
"=",
"np",
".",
"array",
"(",
"[",
"float",
"(",
"p",
")",
"for",
"p",
"in",
"prices",
"]",
")",
"if",
"not",
"isinstance",
"(",
"fudge",
",",
"bool",
")",
"and",
"fudge",
":",
"fudge",
"=",
"float",
"(",
"fudge",
")",
"elif",
"fudge",
"==",
"True",
"or",
"(",
"isinstance",
"(",
"fudge",
",",
"float",
")",
"and",
"fudge",
"==",
"0.0",
")",
":",
"fudge",
"=",
"(",
"len",
"(",
"prices",
")",
"-",
"1.",
")",
"/",
"len",
"(",
"prices",
")",
"else",
":",
"fudge",
"=",
"1.0",
"daily_returns",
"=",
"np",
".",
"diff",
"(",
"prices",
")",
"/",
"prices",
"[",
"0",
":",
"-",
"1",
"]",
"# print daily_returns",
"end_price",
"=",
"float",
"(",
"prices",
"[",
"-",
"1",
"]",
")",
"start_price",
"=",
"(",
"prices",
"[",
"0",
"]",
")",
"mean",
"=",
"fudge",
"*",
"np",
".",
"average",
"(",
"daily_returns",
")",
"variance",
"=",
"fudge",
"*",
"np",
".",
"sum",
"(",
"(",
"daily_returns",
"-",
"mean",
")",
"*",
"(",
"daily_returns",
"-",
"mean",
")",
")",
"/",
"float",
"(",
"len",
"(",
"daily_returns",
")",
")",
"results",
"=",
"{",
"'standared deviation of daily returns'",
":",
"math",
".",
"sqrt",
"(",
"variance",
")",
",",
"'variance of daily returns'",
":",
"variance",
",",
"'average daily return'",
":",
"mean",
",",
"'Sharpe ratio'",
":",
"mean",
"*",
"np",
".",
"sqrt",
"(",
"sharpe_days",
")",
"/",
"np",
".",
"sqrt",
"(",
"variance",
")",
",",
"'total return'",
":",
"end_price",
"/",
"start_price",
",",
"'final value'",
":",
"end_price",
",",
"'starting value'",
":",
"start_price",
",",
"}",
"results",
"[",
"'return rate'",
"]",
"=",
"results",
"[",
"'total return'",
"]",
"-",
"1.0",
"return",
"results"
] |
Calculate the volatiliy, average daily return, Sharpe ratio, and cumulative return
Arguments:
prices (file or basestring or iterable): path to file or file pointer or sequence of prices/values of a portfolio or equity
fudge (bool): Whether to use Tucker Balche's erroneous division by N or the more accurate N-1 for stddev of returns
sharpe_days: Number of trading days in a year. Sharpe ratio = sqrt(sharpe_days) * total_return / std_dev_of_daily_returns
Examples:
>>> metrics(np.array([1,2,3,4])) == {'mean': 0.61111111111111105, 'return': 4.0, 'sharpe': 34.245718429742873, 'std': 0.28327886186626583}
True
>>> metrics(portfolio_prices(symbols=['AAPL', 'GLD', 'GOOG', 'XOM'], start=datetime.datetime(2011,1,1), end=datetime.datetime(2011,12,31), allocations=[0.4, 0.4, 0.0, 0.2])
... ) == {'std': 0.0101467067654, 'mean': 0.000657261102001, 'sharpe': 1.02828403099, 'return': 1.16487261965}
True
|
[
"Calculate",
"the",
"volatiliy",
"average",
"daily",
"return",
"Sharpe",
"ratio",
"and",
"cumulative",
"return"
] |
836911258a0e920083a88c91beae88eefdebb20c
|
https://github.com/hobson/pug-invest/blob/836911258a0e920083a88c91beae88eefdebb20c/pug/invest/sandbox/sim.py#L319-L372
|
242,411
|
hobson/pug-invest
|
pug/invest/sandbox/sim.py
|
buy_on_drop
|
def buy_on_drop(symbol_set="sp5002012",
dataobj=dataobj,
start=datetime.datetime(2008, 1, 3),
end=datetime.datetime(2009, 12, 28),
market_sym='$SPX',
threshold=6,
sell_delay=5,
):
'''Compute and display an "event profile" for multiple sets of symbols'''
if symbol_set:
if isinstance(symbol_set, basestring):
if symbol_set.lower().startswith('sp'):
symbol_set = dataobj.get_symbols_from_list(symbol_set.lower())
else:
symbol_set = [sym.stip().upper() for sym in symbol_set.split(",")]
else:
symbol_set = dataobj.get_symbols_from_list("sp5002012")
if market_sym:
symbol_set.append(market_sym)
print "Starting Event Study, retrieving data for the {0} symbol list...".format(symbol_set)
market_data = get_clean_prices(symbol_set, dataobj=dataobj, start=start, end=end)
print "Finding events for {0} symbols between {1} and {2}...".format(len(symbol_set), start, end)
trigger_kwargs={'threshold': threshold}
events = find_events(symbol_set, market_data, market_sym=market_sym, trigger=drop_below, trigger_kwargs=trigger_kwargs)
csvwriter = csv.writer(getattr(args, 'outfile', open('buy_on_drop_outfile.csv', 'w')), dialect='excel', quoting=csv.QUOTE_MINIMAL)
for order in generate_orders(events, sell_delay=sell_delay, sep=None):
csvwriter.writerow(order)
print "Creating Study report for {0} events...".format(len(events))
ep.eventprofiler(events, market_data,
i_lookback=20, i_lookforward=20,
s_filename='Event report--buy on drop below {0} for {1} symbols.pdf'.format(threshold, len(symbol_set)),
b_market_neutral=True,
b_errorbars=True,
s_market_sym=market_sym,
)
return events
|
python
|
def buy_on_drop(symbol_set="sp5002012",
dataobj=dataobj,
start=datetime.datetime(2008, 1, 3),
end=datetime.datetime(2009, 12, 28),
market_sym='$SPX',
threshold=6,
sell_delay=5,
):
'''Compute and display an "event profile" for multiple sets of symbols'''
if symbol_set:
if isinstance(symbol_set, basestring):
if symbol_set.lower().startswith('sp'):
symbol_set = dataobj.get_symbols_from_list(symbol_set.lower())
else:
symbol_set = [sym.stip().upper() for sym in symbol_set.split(",")]
else:
symbol_set = dataobj.get_symbols_from_list("sp5002012")
if market_sym:
symbol_set.append(market_sym)
print "Starting Event Study, retrieving data for the {0} symbol list...".format(symbol_set)
market_data = get_clean_prices(symbol_set, dataobj=dataobj, start=start, end=end)
print "Finding events for {0} symbols between {1} and {2}...".format(len(symbol_set), start, end)
trigger_kwargs={'threshold': threshold}
events = find_events(symbol_set, market_data, market_sym=market_sym, trigger=drop_below, trigger_kwargs=trigger_kwargs)
csvwriter = csv.writer(getattr(args, 'outfile', open('buy_on_drop_outfile.csv', 'w')), dialect='excel', quoting=csv.QUOTE_MINIMAL)
for order in generate_orders(events, sell_delay=sell_delay, sep=None):
csvwriter.writerow(order)
print "Creating Study report for {0} events...".format(len(events))
ep.eventprofiler(events, market_data,
i_lookback=20, i_lookforward=20,
s_filename='Event report--buy on drop below {0} for {1} symbols.pdf'.format(threshold, len(symbol_set)),
b_market_neutral=True,
b_errorbars=True,
s_market_sym=market_sym,
)
return events
|
[
"def",
"buy_on_drop",
"(",
"symbol_set",
"=",
"\"sp5002012\"",
",",
"dataobj",
"=",
"dataobj",
",",
"start",
"=",
"datetime",
".",
"datetime",
"(",
"2008",
",",
"1",
",",
"3",
")",
",",
"end",
"=",
"datetime",
".",
"datetime",
"(",
"2009",
",",
"12",
",",
"28",
")",
",",
"market_sym",
"=",
"'$SPX'",
",",
"threshold",
"=",
"6",
",",
"sell_delay",
"=",
"5",
",",
")",
":",
"if",
"symbol_set",
":",
"if",
"isinstance",
"(",
"symbol_set",
",",
"basestring",
")",
":",
"if",
"symbol_set",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'sp'",
")",
":",
"symbol_set",
"=",
"dataobj",
".",
"get_symbols_from_list",
"(",
"symbol_set",
".",
"lower",
"(",
")",
")",
"else",
":",
"symbol_set",
"=",
"[",
"sym",
".",
"stip",
"(",
")",
".",
"upper",
"(",
")",
"for",
"sym",
"in",
"symbol_set",
".",
"split",
"(",
"\",\"",
")",
"]",
"else",
":",
"symbol_set",
"=",
"dataobj",
".",
"get_symbols_from_list",
"(",
"\"sp5002012\"",
")",
"if",
"market_sym",
":",
"symbol_set",
".",
"append",
"(",
"market_sym",
")",
"print",
"\"Starting Event Study, retrieving data for the {0} symbol list...\"",
".",
"format",
"(",
"symbol_set",
")",
"market_data",
"=",
"get_clean_prices",
"(",
"symbol_set",
",",
"dataobj",
"=",
"dataobj",
",",
"start",
"=",
"start",
",",
"end",
"=",
"end",
")",
"print",
"\"Finding events for {0} symbols between {1} and {2}...\"",
".",
"format",
"(",
"len",
"(",
"symbol_set",
")",
",",
"start",
",",
"end",
")",
"trigger_kwargs",
"=",
"{",
"'threshold'",
":",
"threshold",
"}",
"events",
"=",
"find_events",
"(",
"symbol_set",
",",
"market_data",
",",
"market_sym",
"=",
"market_sym",
",",
"trigger",
"=",
"drop_below",
",",
"trigger_kwargs",
"=",
"trigger_kwargs",
")",
"csvwriter",
"=",
"csv",
".",
"writer",
"(",
"getattr",
"(",
"args",
",",
"'outfile'",
",",
"open",
"(",
"'buy_on_drop_outfile.csv'",
",",
"'w'",
")",
")",
",",
"dialect",
"=",
"'excel'",
",",
"quoting",
"=",
"csv",
".",
"QUOTE_MINIMAL",
")",
"for",
"order",
"in",
"generate_orders",
"(",
"events",
",",
"sell_delay",
"=",
"sell_delay",
",",
"sep",
"=",
"None",
")",
":",
"csvwriter",
".",
"writerow",
"(",
"order",
")",
"print",
"\"Creating Study report for {0} events...\"",
".",
"format",
"(",
"len",
"(",
"events",
")",
")",
"ep",
".",
"eventprofiler",
"(",
"events",
",",
"market_data",
",",
"i_lookback",
"=",
"20",
",",
"i_lookforward",
"=",
"20",
",",
"s_filename",
"=",
"'Event report--buy on drop below {0} for {1} symbols.pdf'",
".",
"format",
"(",
"threshold",
",",
"len",
"(",
"symbol_set",
")",
")",
",",
"b_market_neutral",
"=",
"True",
",",
"b_errorbars",
"=",
"True",
",",
"s_market_sym",
"=",
"market_sym",
",",
")",
"return",
"events"
] |
Compute and display an "event profile" for multiple sets of symbols
|
[
"Compute",
"and",
"display",
"an",
"event",
"profile",
"for",
"multiple",
"sets",
"of",
"symbols"
] |
836911258a0e920083a88c91beae88eefdebb20c
|
https://github.com/hobson/pug-invest/blob/836911258a0e920083a88c91beae88eefdebb20c/pug/invest/sandbox/sim.py#L437-L475
|
242,412
|
hobson/pug-invest
|
pug/invest/sandbox/sim.py
|
generate_orders
|
def generate_orders(events, sell_delay=5, sep=','):
"""Generate CSV orders based on events indicated in a DataFrame
Arguments:
events (pandas.DataFrame): Table of NaNs or 1's, one column for each symbol.
1 indicates a BUY event. -1 a SELL event. nan or 0 is a nonevent.
sell_delay (float): Number of days to wait before selling back the shares bought
sep (str or None): if sep is None, orders will be returns as tuples of `int`s, `float`s, and `str`s
otherwise the separator will be used to join the order parameters into the yielded str
Returns:
generator of str: yielded CSV rows in the format (yr, mo, day, symbol, Buy/Sell, shares)
"""
sell_delay = float(unicode(sell_delay)) or 1
for i, (t, row) in enumerate(events.iterrows()):
for sym, event in row.to_dict().iteritems():
# print sym, event, type(event)
# return events
if event and not np.isnan(event):
# add a sell event `sell_delay` in the future within the existing `events` DataFrame
# modify the series, but only in the future and be careful not to step on existing events
if event > 0:
sell_event_i = min(i + sell_delay, len(events) - 1)
sell_event_t = events.index[sell_event_i]
sell_event = events[sym][sell_event_i]
if np.isnan(sell_event):
events[sym][sell_event_t] = -1
else:
events[sym][sell_event_t] += -1
order = (t.year, t.month, t.day, sym, 'Buy' if event > 0 else 'Sell', abs(event) * 100)
if isinstance(sep, basestring):
yield sep.join(order)
yield order
|
python
|
def generate_orders(events, sell_delay=5, sep=','):
"""Generate CSV orders based on events indicated in a DataFrame
Arguments:
events (pandas.DataFrame): Table of NaNs or 1's, one column for each symbol.
1 indicates a BUY event. -1 a SELL event. nan or 0 is a nonevent.
sell_delay (float): Number of days to wait before selling back the shares bought
sep (str or None): if sep is None, orders will be returns as tuples of `int`s, `float`s, and `str`s
otherwise the separator will be used to join the order parameters into the yielded str
Returns:
generator of str: yielded CSV rows in the format (yr, mo, day, symbol, Buy/Sell, shares)
"""
sell_delay = float(unicode(sell_delay)) or 1
for i, (t, row) in enumerate(events.iterrows()):
for sym, event in row.to_dict().iteritems():
# print sym, event, type(event)
# return events
if event and not np.isnan(event):
# add a sell event `sell_delay` in the future within the existing `events` DataFrame
# modify the series, but only in the future and be careful not to step on existing events
if event > 0:
sell_event_i = min(i + sell_delay, len(events) - 1)
sell_event_t = events.index[sell_event_i]
sell_event = events[sym][sell_event_i]
if np.isnan(sell_event):
events[sym][sell_event_t] = -1
else:
events[sym][sell_event_t] += -1
order = (t.year, t.month, t.day, sym, 'Buy' if event > 0 else 'Sell', abs(event) * 100)
if isinstance(sep, basestring):
yield sep.join(order)
yield order
|
[
"def",
"generate_orders",
"(",
"events",
",",
"sell_delay",
"=",
"5",
",",
"sep",
"=",
"','",
")",
":",
"sell_delay",
"=",
"float",
"(",
"unicode",
"(",
"sell_delay",
")",
")",
"or",
"1",
"for",
"i",
",",
"(",
"t",
",",
"row",
")",
"in",
"enumerate",
"(",
"events",
".",
"iterrows",
"(",
")",
")",
":",
"for",
"sym",
",",
"event",
"in",
"row",
".",
"to_dict",
"(",
")",
".",
"iteritems",
"(",
")",
":",
"# print sym, event, type(event)",
"# return events",
"if",
"event",
"and",
"not",
"np",
".",
"isnan",
"(",
"event",
")",
":",
"# add a sell event `sell_delay` in the future within the existing `events` DataFrame",
"# modify the series, but only in the future and be careful not to step on existing events",
"if",
"event",
">",
"0",
":",
"sell_event_i",
"=",
"min",
"(",
"i",
"+",
"sell_delay",
",",
"len",
"(",
"events",
")",
"-",
"1",
")",
"sell_event_t",
"=",
"events",
".",
"index",
"[",
"sell_event_i",
"]",
"sell_event",
"=",
"events",
"[",
"sym",
"]",
"[",
"sell_event_i",
"]",
"if",
"np",
".",
"isnan",
"(",
"sell_event",
")",
":",
"events",
"[",
"sym",
"]",
"[",
"sell_event_t",
"]",
"=",
"-",
"1",
"else",
":",
"events",
"[",
"sym",
"]",
"[",
"sell_event_t",
"]",
"+=",
"-",
"1",
"order",
"=",
"(",
"t",
".",
"year",
",",
"t",
".",
"month",
",",
"t",
".",
"day",
",",
"sym",
",",
"'Buy'",
"if",
"event",
">",
"0",
"else",
"'Sell'",
",",
"abs",
"(",
"event",
")",
"*",
"100",
")",
"if",
"isinstance",
"(",
"sep",
",",
"basestring",
")",
":",
"yield",
"sep",
".",
"join",
"(",
"order",
")",
"yield",
"order"
] |
Generate CSV orders based on events indicated in a DataFrame
Arguments:
events (pandas.DataFrame): Table of NaNs or 1's, one column for each symbol.
1 indicates a BUY event. -1 a SELL event. nan or 0 is a nonevent.
sell_delay (float): Number of days to wait before selling back the shares bought
sep (str or None): if sep is None, orders will be returns as tuples of `int`s, `float`s, and `str`s
otherwise the separator will be used to join the order parameters into the yielded str
Returns:
generator of str: yielded CSV rows in the format (yr, mo, day, symbol, Buy/Sell, shares)
|
[
"Generate",
"CSV",
"orders",
"based",
"on",
"events",
"indicated",
"in",
"a",
"DataFrame"
] |
836911258a0e920083a88c91beae88eefdebb20c
|
https://github.com/hobson/pug-invest/blob/836911258a0e920083a88c91beae88eefdebb20c/pug/invest/sandbox/sim.py#L576-L608
|
242,413
|
BlueHack-Core/blueforge
|
blueforge/util/trans.py
|
download_file
|
def download_file(save_path, file_url):
""" Download file from http url link """
r = requests.get(file_url) # create HTTP response object
with open(save_path, 'wb') as f:
f.write(r.content)
return save_path
|
python
|
def download_file(save_path, file_url):
""" Download file from http url link """
r = requests.get(file_url) # create HTTP response object
with open(save_path, 'wb') as f:
f.write(r.content)
return save_path
|
[
"def",
"download_file",
"(",
"save_path",
",",
"file_url",
")",
":",
"r",
"=",
"requests",
".",
"get",
"(",
"file_url",
")",
"# create HTTP response object",
"with",
"open",
"(",
"save_path",
",",
"'wb'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"r",
".",
"content",
")",
"return",
"save_path"
] |
Download file from http url link
|
[
"Download",
"file",
"from",
"http",
"url",
"link"
] |
ac40a888ee9c388638a8f312c51f7500b8891b6c
|
https://github.com/BlueHack-Core/blueforge/blob/ac40a888ee9c388638a8f312c51f7500b8891b6c/blueforge/util/trans.py#L6-L14
|
242,414
|
BlueHack-Core/blueforge
|
blueforge/util/trans.py
|
make_url
|
def make_url(domain, location):
""" This function helps to make full url path."""
url = urlparse(location)
if url.scheme == '' and url.netloc == '':
return domain + url.path
elif url.scheme == '':
return 'http://' + url.netloc + url.path
else:
return url.geturl()
|
python
|
def make_url(domain, location):
""" This function helps to make full url path."""
url = urlparse(location)
if url.scheme == '' and url.netloc == '':
return domain + url.path
elif url.scheme == '':
return 'http://' + url.netloc + url.path
else:
return url.geturl()
|
[
"def",
"make_url",
"(",
"domain",
",",
"location",
")",
":",
"url",
"=",
"urlparse",
"(",
"location",
")",
"if",
"url",
".",
"scheme",
"==",
"''",
"and",
"url",
".",
"netloc",
"==",
"''",
":",
"return",
"domain",
"+",
"url",
".",
"path",
"elif",
"url",
".",
"scheme",
"==",
"''",
":",
"return",
"'http://'",
"+",
"url",
".",
"netloc",
"+",
"url",
".",
"path",
"else",
":",
"return",
"url",
".",
"geturl",
"(",
")"
] |
This function helps to make full url path.
|
[
"This",
"function",
"helps",
"to",
"make",
"full",
"url",
"path",
"."
] |
ac40a888ee9c388638a8f312c51f7500b8891b6c
|
https://github.com/BlueHack-Core/blueforge/blob/ac40a888ee9c388638a8f312c51f7500b8891b6c/blueforge/util/trans.py#L17-L27
|
242,415
|
Kopachris/seshet
|
seshet/utils.py
|
Storage.getlist
|
def getlist(self, key):
"""Returns a Storage value as a list.
If the value is a list it will be returned as-is.
If object is None, an empty list will be returned.
Otherwise, `[value]` will be returned.
Example output for a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getlist('x')
['abc']
>>> request.vars.getlist('y')
['abc', 'def']
>>> request.vars.getlist('z')
[]
"""
value = self.get(key, [])
if value is None or isinstance(value, (list, tuple)):
return value
else:
return [value]
|
python
|
def getlist(self, key):
"""Returns a Storage value as a list.
If the value is a list it will be returned as-is.
If object is None, an empty list will be returned.
Otherwise, `[value]` will be returned.
Example output for a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getlist('x')
['abc']
>>> request.vars.getlist('y')
['abc', 'def']
>>> request.vars.getlist('z')
[]
"""
value = self.get(key, [])
if value is None or isinstance(value, (list, tuple)):
return value
else:
return [value]
|
[
"def",
"getlist",
"(",
"self",
",",
"key",
")",
":",
"value",
"=",
"self",
".",
"get",
"(",
"key",
",",
"[",
"]",
")",
"if",
"value",
"is",
"None",
"or",
"isinstance",
"(",
"value",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"return",
"value",
"else",
":",
"return",
"[",
"value",
"]"
] |
Returns a Storage value as a list.
If the value is a list it will be returned as-is.
If object is None, an empty list will be returned.
Otherwise, `[value]` will be returned.
Example output for a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getlist('x')
['abc']
>>> request.vars.getlist('y')
['abc', 'def']
>>> request.vars.getlist('z')
[]
|
[
"Returns",
"a",
"Storage",
"value",
"as",
"a",
"list",
"."
] |
d55bae01cff56762c5467138474145a2c17d1932
|
https://github.com/Kopachris/seshet/blob/d55bae01cff56762c5467138474145a2c17d1932/seshet/utils.py#L94-L120
|
242,416
|
Kopachris/seshet
|
seshet/utils.py
|
Storage.getfirst
|
def getfirst(self, key, default=None):
"""Returns the first value of a list or the value itself when given a
`request.vars` style key.
If the value is a list, its first item will be returned;
otherwise, the value will be returned as-is.
Example output for a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getfirst('x')
'abc'
>>> request.vars.getfirst('y')
'abc'
>>> request.vars.getfirst('z')
"""
values = self.getlist(key)
return values[0] if values else default
|
python
|
def getfirst(self, key, default=None):
"""Returns the first value of a list or the value itself when given a
`request.vars` style key.
If the value is a list, its first item will be returned;
otherwise, the value will be returned as-is.
Example output for a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getfirst('x')
'abc'
>>> request.vars.getfirst('y')
'abc'
>>> request.vars.getfirst('z')
"""
values = self.getlist(key)
return values[0] if values else default
|
[
"def",
"getfirst",
"(",
"self",
",",
"key",
",",
"default",
"=",
"None",
")",
":",
"values",
"=",
"self",
".",
"getlist",
"(",
"key",
")",
"return",
"values",
"[",
"0",
"]",
"if",
"values",
"else",
"default"
] |
Returns the first value of a list or the value itself when given a
`request.vars` style key.
If the value is a list, its first item will be returned;
otherwise, the value will be returned as-is.
Example output for a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getfirst('x')
'abc'
>>> request.vars.getfirst('y')
'abc'
>>> request.vars.getfirst('z')
|
[
"Returns",
"the",
"first",
"value",
"of",
"a",
"list",
"or",
"the",
"value",
"itself",
"when",
"given",
"a",
"request",
".",
"vars",
"style",
"key",
"."
] |
d55bae01cff56762c5467138474145a2c17d1932
|
https://github.com/Kopachris/seshet/blob/d55bae01cff56762c5467138474145a2c17d1932/seshet/utils.py#L122-L144
|
242,417
|
Kopachris/seshet
|
seshet/utils.py
|
Storage.getlast
|
def getlast(self, key, default=None):
"""Returns the last value of a list or value itself when given a
`request.vars` style key.
If the value is a list, the last item will be returned;
otherwise, the value will be returned as-is.
Simulated output with a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getlast('x')
'abc'
>>> request.vars.getlast('y')
'def'
>>> request.vars.getlast('z')
"""
values = self.getlist(key)
return values[-1] if values else default
|
python
|
def getlast(self, key, default=None):
"""Returns the last value of a list or value itself when given a
`request.vars` style key.
If the value is a list, the last item will be returned;
otherwise, the value will be returned as-is.
Simulated output with a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getlast('x')
'abc'
>>> request.vars.getlast('y')
'def'
>>> request.vars.getlast('z')
"""
values = self.getlist(key)
return values[-1] if values else default
|
[
"def",
"getlast",
"(",
"self",
",",
"key",
",",
"default",
"=",
"None",
")",
":",
"values",
"=",
"self",
".",
"getlist",
"(",
"key",
")",
"return",
"values",
"[",
"-",
"1",
"]",
"if",
"values",
"else",
"default"
] |
Returns the last value of a list or value itself when given a
`request.vars` style key.
If the value is a list, the last item will be returned;
otherwise, the value will be returned as-is.
Simulated output with a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getlast('x')
'abc'
>>> request.vars.getlast('y')
'def'
>>> request.vars.getlast('z')
|
[
"Returns",
"the",
"last",
"value",
"of",
"a",
"list",
"or",
"value",
"itself",
"when",
"given",
"a",
"request",
".",
"vars",
"style",
"key",
"."
] |
d55bae01cff56762c5467138474145a2c17d1932
|
https://github.com/Kopachris/seshet/blob/d55bae01cff56762c5467138474145a2c17d1932/seshet/utils.py#L146-L168
|
242,418
|
francois-vincent/clingon
|
clingon/clingon.py
|
Clizer._eval_variables
|
def _eval_variables(self):
"""evaluates callable _variables
"""
for k, v in listitems(self._variables):
self._variables[k] = v() if hasattr(v, '__call__') else v
|
python
|
def _eval_variables(self):
"""evaluates callable _variables
"""
for k, v in listitems(self._variables):
self._variables[k] = v() if hasattr(v, '__call__') else v
|
[
"def",
"_eval_variables",
"(",
"self",
")",
":",
"for",
"k",
",",
"v",
"in",
"listitems",
"(",
"self",
".",
"_variables",
")",
":",
"self",
".",
"_variables",
"[",
"k",
"]",
"=",
"v",
"(",
")",
"if",
"hasattr",
"(",
"v",
",",
"'__call__'",
")",
"else",
"v"
] |
evaluates callable _variables
|
[
"evaluates",
"callable",
"_variables"
] |
afc9db073dbc72b2562ce3e444152986a555dcbf
|
https://github.com/francois-vincent/clingon/blob/afc9db073dbc72b2562ce3e444152986a555dcbf/clingon/clingon.py#L217-L221
|
242,419
|
formwork-io/lazarus
|
lazarus/_util.py
|
defer
|
def defer(callable):
'''Defers execution of the callable to a thread.
For example:
>>> def foo():
... print('bar')
>>> join = defer(foo)
>>> join()
'''
t = threading.Thread(target=callable)
t.start()
return t.join
|
python
|
def defer(callable):
'''Defers execution of the callable to a thread.
For example:
>>> def foo():
... print('bar')
>>> join = defer(foo)
>>> join()
'''
t = threading.Thread(target=callable)
t.start()
return t.join
|
[
"def",
"defer",
"(",
"callable",
")",
":",
"t",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"callable",
")",
"t",
".",
"start",
"(",
")",
"return",
"t",
".",
"join"
] |
Defers execution of the callable to a thread.
For example:
>>> def foo():
... print('bar')
>>> join = defer(foo)
>>> join()
|
[
"Defers",
"execution",
"of",
"the",
"callable",
"to",
"a",
"thread",
"."
] |
b2b6120fe06d69c23b4f41d55b6d71860a9fdeaa
|
https://github.com/formwork-io/lazarus/blob/b2b6120fe06d69c23b4f41d55b6d71860a9fdeaa/lazarus/_util.py#L11-L23
|
242,420
|
formwork-io/lazarus
|
lazarus/_util.py
|
close_fds
|
def close_fds():
'''Close extraneous file descriptors.
On Linux, close everything but stdin, stdout, and stderr. On Mac, close
stdin, stdout, and stderr and everything owned by our user id.
'''
def close(fd):
with ignored(OSError):
os.close(fd)
if sys.platform == 'linux':
fd_dir = '/proc/self/fd'
fds = set(map(int, os.listdir(fd_dir)))
for x in (fds - {0, 1, 2}):
close(x)
elif sys.platform == 'darwin':
uid = os.getuid()
fd_dir = '/dev/fd'
fds = set(map(int, os.listdir(fd_dir)))
for x in (fds - {0, 1, 2}):
path = '/dev/fd/' + str(x)
if not os.access(path, os.R_OK):
continue
stat = os.fstat(x)
if stat.st_uid != uid:
continue
close(x)
|
python
|
def close_fds():
'''Close extraneous file descriptors.
On Linux, close everything but stdin, stdout, and stderr. On Mac, close
stdin, stdout, and stderr and everything owned by our user id.
'''
def close(fd):
with ignored(OSError):
os.close(fd)
if sys.platform == 'linux':
fd_dir = '/proc/self/fd'
fds = set(map(int, os.listdir(fd_dir)))
for x in (fds - {0, 1, 2}):
close(x)
elif sys.platform == 'darwin':
uid = os.getuid()
fd_dir = '/dev/fd'
fds = set(map(int, os.listdir(fd_dir)))
for x in (fds - {0, 1, 2}):
path = '/dev/fd/' + str(x)
if not os.access(path, os.R_OK):
continue
stat = os.fstat(x)
if stat.st_uid != uid:
continue
close(x)
|
[
"def",
"close_fds",
"(",
")",
":",
"def",
"close",
"(",
"fd",
")",
":",
"with",
"ignored",
"(",
"OSError",
")",
":",
"os",
".",
"close",
"(",
"fd",
")",
"if",
"sys",
".",
"platform",
"==",
"'linux'",
":",
"fd_dir",
"=",
"'/proc/self/fd'",
"fds",
"=",
"set",
"(",
"map",
"(",
"int",
",",
"os",
".",
"listdir",
"(",
"fd_dir",
")",
")",
")",
"for",
"x",
"in",
"(",
"fds",
"-",
"{",
"0",
",",
"1",
",",
"2",
"}",
")",
":",
"close",
"(",
"x",
")",
"elif",
"sys",
".",
"platform",
"==",
"'darwin'",
":",
"uid",
"=",
"os",
".",
"getuid",
"(",
")",
"fd_dir",
"=",
"'/dev/fd'",
"fds",
"=",
"set",
"(",
"map",
"(",
"int",
",",
"os",
".",
"listdir",
"(",
"fd_dir",
")",
")",
")",
"for",
"x",
"in",
"(",
"fds",
"-",
"{",
"0",
",",
"1",
",",
"2",
"}",
")",
":",
"path",
"=",
"'/dev/fd/'",
"+",
"str",
"(",
"x",
")",
"if",
"not",
"os",
".",
"access",
"(",
"path",
",",
"os",
".",
"R_OK",
")",
":",
"continue",
"stat",
"=",
"os",
".",
"fstat",
"(",
"x",
")",
"if",
"stat",
".",
"st_uid",
"!=",
"uid",
":",
"continue",
"close",
"(",
"x",
")"
] |
Close extraneous file descriptors.
On Linux, close everything but stdin, stdout, and stderr. On Mac, close
stdin, stdout, and stderr and everything owned by our user id.
|
[
"Close",
"extraneous",
"file",
"descriptors",
"."
] |
b2b6120fe06d69c23b4f41d55b6d71860a9fdeaa
|
https://github.com/formwork-io/lazarus/blob/b2b6120fe06d69c23b4f41d55b6d71860a9fdeaa/lazarus/_util.py#L48-L75
|
242,421
|
HazardDede/dictmentor
|
dictmentor/base.py
|
DictMentor.bind
|
def bind(self, extension: Extension) -> 'DictMentor':
"""
Add any predefined or custom extension.
Args:
extension: Extension to add to the processor.
Returns:
The DictMentor itself for chaining.
"""
if not Extension.is_valid_extension(extension):
raise ValueError("Cannot bind extension due to missing interface requirements")
self._extensions.append(extension)
return self
|
python
|
def bind(self, extension: Extension) -> 'DictMentor':
"""
Add any predefined or custom extension.
Args:
extension: Extension to add to the processor.
Returns:
The DictMentor itself for chaining.
"""
if not Extension.is_valid_extension(extension):
raise ValueError("Cannot bind extension due to missing interface requirements")
self._extensions.append(extension)
return self
|
[
"def",
"bind",
"(",
"self",
",",
"extension",
":",
"Extension",
")",
"->",
"'DictMentor'",
":",
"if",
"not",
"Extension",
".",
"is_valid_extension",
"(",
"extension",
")",
":",
"raise",
"ValueError",
"(",
"\"Cannot bind extension due to missing interface requirements\"",
")",
"self",
".",
"_extensions",
".",
"append",
"(",
"extension",
")",
"return",
"self"
] |
Add any predefined or custom extension.
Args:
extension: Extension to add to the processor.
Returns:
The DictMentor itself for chaining.
|
[
"Add",
"any",
"predefined",
"or",
"custom",
"extension",
"."
] |
f50ca26ed04f7a924cde6e4d464c4f6ccba4e320
|
https://github.com/HazardDede/dictmentor/blob/f50ca26ed04f7a924cde6e4d464c4f6ccba4e320/dictmentor/base.py#L31-L45
|
242,422
|
HazardDede/dictmentor
|
dictmentor/base.py
|
DictMentor.augment
|
def augment(self, dct: NonAugmentedDict,
document: Optional[YamlDocument] = None) -> AugmentedDict:
"""
Augments the given dictionary by using all the bound extensions.
Args:
dct: Dictionary to augment.
document: The document the dictionary was loaded from.
Returns:
The augmented dictionary.
"""
Validator.instance_of(dict, raise_ex=True, dct=dct)
# Apply any configured loader
for instance in self._extensions:
nodes = list(dict_find_pattern(dct, **instance.config()))
for parent, k, val in nodes:
parent.pop(k)
fragment = instance.apply(
ExtensionContext(
mentor=self,
document=document or dct,
dct=dct,
parent_node=parent,
node=(k, val)
)
)
if fragment is not None:
parent.update(fragment)
return dct
|
python
|
def augment(self, dct: NonAugmentedDict,
document: Optional[YamlDocument] = None) -> AugmentedDict:
"""
Augments the given dictionary by using all the bound extensions.
Args:
dct: Dictionary to augment.
document: The document the dictionary was loaded from.
Returns:
The augmented dictionary.
"""
Validator.instance_of(dict, raise_ex=True, dct=dct)
# Apply any configured loader
for instance in self._extensions:
nodes = list(dict_find_pattern(dct, **instance.config()))
for parent, k, val in nodes:
parent.pop(k)
fragment = instance.apply(
ExtensionContext(
mentor=self,
document=document or dct,
dct=dct,
parent_node=parent,
node=(k, val)
)
)
if fragment is not None:
parent.update(fragment)
return dct
|
[
"def",
"augment",
"(",
"self",
",",
"dct",
":",
"NonAugmentedDict",
",",
"document",
":",
"Optional",
"[",
"YamlDocument",
"]",
"=",
"None",
")",
"->",
"AugmentedDict",
":",
"Validator",
".",
"instance_of",
"(",
"dict",
",",
"raise_ex",
"=",
"True",
",",
"dct",
"=",
"dct",
")",
"# Apply any configured loader",
"for",
"instance",
"in",
"self",
".",
"_extensions",
":",
"nodes",
"=",
"list",
"(",
"dict_find_pattern",
"(",
"dct",
",",
"*",
"*",
"instance",
".",
"config",
"(",
")",
")",
")",
"for",
"parent",
",",
"k",
",",
"val",
"in",
"nodes",
":",
"parent",
".",
"pop",
"(",
"k",
")",
"fragment",
"=",
"instance",
".",
"apply",
"(",
"ExtensionContext",
"(",
"mentor",
"=",
"self",
",",
"document",
"=",
"document",
"or",
"dct",
",",
"dct",
"=",
"dct",
",",
"parent_node",
"=",
"parent",
",",
"node",
"=",
"(",
"k",
",",
"val",
")",
")",
")",
"if",
"fragment",
"is",
"not",
"None",
":",
"parent",
".",
"update",
"(",
"fragment",
")",
"return",
"dct"
] |
Augments the given dictionary by using all the bound extensions.
Args:
dct: Dictionary to augment.
document: The document the dictionary was loaded from.
Returns:
The augmented dictionary.
|
[
"Augments",
"the",
"given",
"dictionary",
"by",
"using",
"all",
"the",
"bound",
"extensions",
"."
] |
f50ca26ed04f7a924cde6e4d464c4f6ccba4e320
|
https://github.com/HazardDede/dictmentor/blob/f50ca26ed04f7a924cde6e4d464c4f6ccba4e320/dictmentor/base.py#L47-L78
|
242,423
|
fmenabe/python-clif
|
examples/backup/lib/types.py
|
Date
|
def Date(value):
"""Custom type for managing dates in the command-line."""
from datetime import datetime
try:
return datetime(*reversed([int(val) for val in value.split('/')]))
except Exception as err:
raise argparse.ArgumentTypeError("invalid date '%s'" % value)
|
python
|
def Date(value):
"""Custom type for managing dates in the command-line."""
from datetime import datetime
try:
return datetime(*reversed([int(val) for val in value.split('/')]))
except Exception as err:
raise argparse.ArgumentTypeError("invalid date '%s'" % value)
|
[
"def",
"Date",
"(",
"value",
")",
":",
"from",
"datetime",
"import",
"datetime",
"try",
":",
"return",
"datetime",
"(",
"*",
"reversed",
"(",
"[",
"int",
"(",
"val",
")",
"for",
"val",
"in",
"value",
".",
"split",
"(",
"'/'",
")",
"]",
")",
")",
"except",
"Exception",
"as",
"err",
":",
"raise",
"argparse",
".",
"ArgumentTypeError",
"(",
"\"invalid date '%s'\"",
"%",
"value",
")"
] |
Custom type for managing dates in the command-line.
|
[
"Custom",
"type",
"for",
"managing",
"dates",
"in",
"the",
"command",
"-",
"line",
"."
] |
7d74b344337567f7951046406510a980afd7ee97
|
https://github.com/fmenabe/python-clif/blob/7d74b344337567f7951046406510a980afd7ee97/examples/backup/lib/types.py#L11-L17
|
242,424
|
the01/python-paps
|
examples/measure/echo_client.py
|
create
|
def create(clients_num, clients_host, clients_port, people_num, throttle):
"""
Prepare clients to execute
:return: Modules to execute, cmd line function
:rtype: list[WrapperClient], (str, object) -> str | None
"""
res = []
for number in range(clients_num):
sc = EchoClient({
'id': number,
'listen_bind_ip': clients_host,
#'multicast_bind_ip': "127.0.0.1",
'listen_port': clients_port + number
})
people = []
for person_number in range(people_num):
people.append(Person(id=person_number))
wrapper = WrapperEchoClient({
'client': sc,
'people': people,
'throttle': throttle
})
res.append(wrapper)
return res, cmd_line
|
python
|
def create(clients_num, clients_host, clients_port, people_num, throttle):
"""
Prepare clients to execute
:return: Modules to execute, cmd line function
:rtype: list[WrapperClient], (str, object) -> str | None
"""
res = []
for number in range(clients_num):
sc = EchoClient({
'id': number,
'listen_bind_ip': clients_host,
#'multicast_bind_ip': "127.0.0.1",
'listen_port': clients_port + number
})
people = []
for person_number in range(people_num):
people.append(Person(id=person_number))
wrapper = WrapperEchoClient({
'client': sc,
'people': people,
'throttle': throttle
})
res.append(wrapper)
return res, cmd_line
|
[
"def",
"create",
"(",
"clients_num",
",",
"clients_host",
",",
"clients_port",
",",
"people_num",
",",
"throttle",
")",
":",
"res",
"=",
"[",
"]",
"for",
"number",
"in",
"range",
"(",
"clients_num",
")",
":",
"sc",
"=",
"EchoClient",
"(",
"{",
"'id'",
":",
"number",
",",
"'listen_bind_ip'",
":",
"clients_host",
",",
"#'multicast_bind_ip': \"127.0.0.1\",",
"'listen_port'",
":",
"clients_port",
"+",
"number",
"}",
")",
"people",
"=",
"[",
"]",
"for",
"person_number",
"in",
"range",
"(",
"people_num",
")",
":",
"people",
".",
"append",
"(",
"Person",
"(",
"id",
"=",
"person_number",
")",
")",
"wrapper",
"=",
"WrapperEchoClient",
"(",
"{",
"'client'",
":",
"sc",
",",
"'people'",
":",
"people",
",",
"'throttle'",
":",
"throttle",
"}",
")",
"res",
".",
"append",
"(",
"wrapper",
")",
"return",
"res",
",",
"cmd_line"
] |
Prepare clients to execute
:return: Modules to execute, cmd line function
:rtype: list[WrapperClient], (str, object) -> str | None
|
[
"Prepare",
"clients",
"to",
"execute"
] |
2dde5a71913e4c7b22901cf05c6ecedd890919c4
|
https://github.com/the01/python-paps/blob/2dde5a71913e4c7b22901cf05c6ecedd890919c4/examples/measure/echo_client.py#L114-L139
|
242,425
|
the01/python-paps
|
examples/measure/echo_client.py
|
EchoClient._packet_loop
|
def _packet_loop(self):
"""
Packet processing loop
:rtype: None
"""
while self._is_running:
# Only wait if there are no more packets in the inbox
if self.inbox.empty() \
and not self.new_packet.wait(self._packet_timeout):
continue
ip, port, packet = self.inbox.get()
if self.inbox.empty():
self.new_packet.clear()
self.debug(u"{}".format(packet))
if packet.header.message_type == MsgType.CONFIG:
self._do_config_packet(packet, ip, port)
elif packet.header.message_type == MsgType.UPDATE:
self._do_update_packet(packet)
|
python
|
def _packet_loop(self):
"""
Packet processing loop
:rtype: None
"""
while self._is_running:
# Only wait if there are no more packets in the inbox
if self.inbox.empty() \
and not self.new_packet.wait(self._packet_timeout):
continue
ip, port, packet = self.inbox.get()
if self.inbox.empty():
self.new_packet.clear()
self.debug(u"{}".format(packet))
if packet.header.message_type == MsgType.CONFIG:
self._do_config_packet(packet, ip, port)
elif packet.header.message_type == MsgType.UPDATE:
self._do_update_packet(packet)
|
[
"def",
"_packet_loop",
"(",
"self",
")",
":",
"while",
"self",
".",
"_is_running",
":",
"# Only wait if there are no more packets in the inbox",
"if",
"self",
".",
"inbox",
".",
"empty",
"(",
")",
"and",
"not",
"self",
".",
"new_packet",
".",
"wait",
"(",
"self",
".",
"_packet_timeout",
")",
":",
"continue",
"ip",
",",
"port",
",",
"packet",
"=",
"self",
".",
"inbox",
".",
"get",
"(",
")",
"if",
"self",
".",
"inbox",
".",
"empty",
"(",
")",
":",
"self",
".",
"new_packet",
".",
"clear",
"(",
")",
"self",
".",
"debug",
"(",
"u\"{}\"",
".",
"format",
"(",
"packet",
")",
")",
"if",
"packet",
".",
"header",
".",
"message_type",
"==",
"MsgType",
".",
"CONFIG",
":",
"self",
".",
"_do_config_packet",
"(",
"packet",
",",
"ip",
",",
"port",
")",
"elif",
"packet",
".",
"header",
".",
"message_type",
"==",
"MsgType",
".",
"UPDATE",
":",
"self",
".",
"_do_update_packet",
"(",
"packet",
")"
] |
Packet processing loop
:rtype: None
|
[
"Packet",
"processing",
"loop"
] |
2dde5a71913e4c7b22901cf05c6ecedd890919c4
|
https://github.com/the01/python-paps/blob/2dde5a71913e4c7b22901cf05c6ecedd890919c4/examples/measure/echo_client.py#L34-L53
|
242,426
|
maxweisspoker/simplebitcoinfuncs
|
simplebitcoinfuncs/miscfuncs.py
|
dechex
|
def dechex(num,zfill=0):
'''
Simple integer to hex converter.
The zfill is the number of bytes, even though the input is a hex
string, which means that the actual zfill is 2x what you might
initially think it would be.
For example:
>>> dechex(4,2)
'0004'
'''
if not isitint(num):
raise TypeError("Input must be integer/long.")
o = hex(num).lstrip("0x").rstrip("L")
if o == "" or o == "0":
o = '00'
try:
o = unhexlify(o)
except:
o = unhexlify("0"+o)
if o == b'\x00' or o == 0:
o = '00'
else:
o = hexstrlify(o)
for i in range((2*zfill)-len(o)):
o = "0" + o
if len(o) % 2:
o = "0" + o
return str(o)
|
python
|
def dechex(num,zfill=0):
'''
Simple integer to hex converter.
The zfill is the number of bytes, even though the input is a hex
string, which means that the actual zfill is 2x what you might
initially think it would be.
For example:
>>> dechex(4,2)
'0004'
'''
if not isitint(num):
raise TypeError("Input must be integer/long.")
o = hex(num).lstrip("0x").rstrip("L")
if o == "" or o == "0":
o = '00'
try:
o = unhexlify(o)
except:
o = unhexlify("0"+o)
if o == b'\x00' or o == 0:
o = '00'
else:
o = hexstrlify(o)
for i in range((2*zfill)-len(o)):
o = "0" + o
if len(o) % 2:
o = "0" + o
return str(o)
|
[
"def",
"dechex",
"(",
"num",
",",
"zfill",
"=",
"0",
")",
":",
"if",
"not",
"isitint",
"(",
"num",
")",
":",
"raise",
"TypeError",
"(",
"\"Input must be integer/long.\"",
")",
"o",
"=",
"hex",
"(",
"num",
")",
".",
"lstrip",
"(",
"\"0x\"",
")",
".",
"rstrip",
"(",
"\"L\"",
")",
"if",
"o",
"==",
"\"\"",
"or",
"o",
"==",
"\"0\"",
":",
"o",
"=",
"'00'",
"try",
":",
"o",
"=",
"unhexlify",
"(",
"o",
")",
"except",
":",
"o",
"=",
"unhexlify",
"(",
"\"0\"",
"+",
"o",
")",
"if",
"o",
"==",
"b'\\x00'",
"or",
"o",
"==",
"0",
":",
"o",
"=",
"'00'",
"else",
":",
"o",
"=",
"hexstrlify",
"(",
"o",
")",
"for",
"i",
"in",
"range",
"(",
"(",
"2",
"*",
"zfill",
")",
"-",
"len",
"(",
"o",
")",
")",
":",
"o",
"=",
"\"0\"",
"+",
"o",
"if",
"len",
"(",
"o",
")",
"%",
"2",
":",
"o",
"=",
"\"0\"",
"+",
"o",
"return",
"str",
"(",
"o",
")"
] |
Simple integer to hex converter.
The zfill is the number of bytes, even though the input is a hex
string, which means that the actual zfill is 2x what you might
initially think it would be.
For example:
>>> dechex(4,2)
'0004'
|
[
"Simple",
"integer",
"to",
"hex",
"converter",
"."
] |
ad332433dfcc067e86d2e77fa0c8f1a27daffb63
|
https://github.com/maxweisspoker/simplebitcoinfuncs/blob/ad332433dfcc067e86d2e77fa0c8f1a27daffb63/simplebitcoinfuncs/miscfuncs.py#L106-L136
|
242,427
|
selenol/selenol-python
|
selenol_python/params.py
|
selenol_params
|
def selenol_params(**kwargs):
"""Decorate request parameters to transform them into Selenol objects."""
def params_decorator(func):
"""Param decorator.
:param f: Function to decorate, typically on_request.
"""
def service_function_wrapper(service, message):
"""Wrap function call.
:param service: SelenolService object.
:param message: SelenolMessage request.
"""
params = {k: f(service, message) for k, f in kwargs.items()}
return func(service, **params)
return service_function_wrapper
return params_decorator
|
python
|
def selenol_params(**kwargs):
"""Decorate request parameters to transform them into Selenol objects."""
def params_decorator(func):
"""Param decorator.
:param f: Function to decorate, typically on_request.
"""
def service_function_wrapper(service, message):
"""Wrap function call.
:param service: SelenolService object.
:param message: SelenolMessage request.
"""
params = {k: f(service, message) for k, f in kwargs.items()}
return func(service, **params)
return service_function_wrapper
return params_decorator
|
[
"def",
"selenol_params",
"(",
"*",
"*",
"kwargs",
")",
":",
"def",
"params_decorator",
"(",
"func",
")",
":",
"\"\"\"Param decorator.\n\n :param f: Function to decorate, typically on_request.\n \"\"\"",
"def",
"service_function_wrapper",
"(",
"service",
",",
"message",
")",
":",
"\"\"\"Wrap function call.\n\n :param service: SelenolService object.\n :param message: SelenolMessage request.\n \"\"\"",
"params",
"=",
"{",
"k",
":",
"f",
"(",
"service",
",",
"message",
")",
"for",
"k",
",",
"f",
"in",
"kwargs",
".",
"items",
"(",
")",
"}",
"return",
"func",
"(",
"service",
",",
"*",
"*",
"params",
")",
"return",
"service_function_wrapper",
"return",
"params_decorator"
] |
Decorate request parameters to transform them into Selenol objects.
|
[
"Decorate",
"request",
"parameters",
"to",
"transform",
"them",
"into",
"Selenol",
"objects",
"."
] |
53775fdfc95161f4aca350305cb3459e6f2f808d
|
https://github.com/selenol/selenol-python/blob/53775fdfc95161f4aca350305cb3459e6f2f808d/selenol_python/params.py#L21-L37
|
242,428
|
selenol/selenol-python
|
selenol_python/params.py
|
_get_value
|
def _get_value(data_structure, key):
"""Return the value of a data_structure given a path.
:param data_structure: Dictionary, list or subscriptable object.
:param key: Array with the defined path ordered.
"""
if len(key) == 0:
raise KeyError()
value = data_structure[key[0]]
if len(key) > 1:
return _get_value(value, key[1:])
return value
|
python
|
def _get_value(data_structure, key):
"""Return the value of a data_structure given a path.
:param data_structure: Dictionary, list or subscriptable object.
:param key: Array with the defined path ordered.
"""
if len(key) == 0:
raise KeyError()
value = data_structure[key[0]]
if len(key) > 1:
return _get_value(value, key[1:])
return value
|
[
"def",
"_get_value",
"(",
"data_structure",
",",
"key",
")",
":",
"if",
"len",
"(",
"key",
")",
"==",
"0",
":",
"raise",
"KeyError",
"(",
")",
"value",
"=",
"data_structure",
"[",
"key",
"[",
"0",
"]",
"]",
"if",
"len",
"(",
"key",
")",
">",
"1",
":",
"return",
"_get_value",
"(",
"value",
",",
"key",
"[",
"1",
":",
"]",
")",
"return",
"value"
] |
Return the value of a data_structure given a path.
:param data_structure: Dictionary, list or subscriptable object.
:param key: Array with the defined path ordered.
|
[
"Return",
"the",
"value",
"of",
"a",
"data_structure",
"given",
"a",
"path",
"."
] |
53775fdfc95161f4aca350305cb3459e6f2f808d
|
https://github.com/selenol/selenol-python/blob/53775fdfc95161f4aca350305cb3459e6f2f808d/selenol_python/params.py#L40-L51
|
242,429
|
selenol/selenol-python
|
selenol_python/params.py
|
get_value_from_session
|
def get_value_from_session(key):
"""Get a session value from the path specifed.
:param key: Array that defines the path of the value inside the message.
"""
def value_from_session_function(service, message):
"""Actual implementation of get_value_from_session function.
:param service: SelenolService object.
:param message: SelenolMessage request.
"""
return _get_value(message.session, key)
return value_from_session_function
|
python
|
def get_value_from_session(key):
"""Get a session value from the path specifed.
:param key: Array that defines the path of the value inside the message.
"""
def value_from_session_function(service, message):
"""Actual implementation of get_value_from_session function.
:param service: SelenolService object.
:param message: SelenolMessage request.
"""
return _get_value(message.session, key)
return value_from_session_function
|
[
"def",
"get_value_from_session",
"(",
"key",
")",
":",
"def",
"value_from_session_function",
"(",
"service",
",",
"message",
")",
":",
"\"\"\"Actual implementation of get_value_from_session function.\n\n :param service: SelenolService object.\n :param message: SelenolMessage request.\n \"\"\"",
"return",
"_get_value",
"(",
"message",
".",
"session",
",",
"key",
")",
"return",
"value_from_session_function"
] |
Get a session value from the path specifed.
:param key: Array that defines the path of the value inside the message.
|
[
"Get",
"a",
"session",
"value",
"from",
"the",
"path",
"specifed",
"."
] |
53775fdfc95161f4aca350305cb3459e6f2f808d
|
https://github.com/selenol/selenol-python/blob/53775fdfc95161f4aca350305cb3459e6f2f808d/selenol_python/params.py#L54-L66
|
242,430
|
selenol/selenol-python
|
selenol_python/params.py
|
get_value_from_content
|
def get_value_from_content(key):
"""Get a value from the path specifed.
:param key: Array that defines the path of the value inside the message.
"""
def value_from_content_function(service, message):
"""Actual implementation of get_value_from_content function.
:param service: SelenolService object.
:param message: SelenolMessage request.
"""
return _get_value(message.content, key)
return value_from_content_function
|
python
|
def get_value_from_content(key):
"""Get a value from the path specifed.
:param key: Array that defines the path of the value inside the message.
"""
def value_from_content_function(service, message):
"""Actual implementation of get_value_from_content function.
:param service: SelenolService object.
:param message: SelenolMessage request.
"""
return _get_value(message.content, key)
return value_from_content_function
|
[
"def",
"get_value_from_content",
"(",
"key",
")",
":",
"def",
"value_from_content_function",
"(",
"service",
",",
"message",
")",
":",
"\"\"\"Actual implementation of get_value_from_content function.\n\n :param service: SelenolService object.\n :param message: SelenolMessage request.\n \"\"\"",
"return",
"_get_value",
"(",
"message",
".",
"content",
",",
"key",
")",
"return",
"value_from_content_function"
] |
Get a value from the path specifed.
:param key: Array that defines the path of the value inside the message.
|
[
"Get",
"a",
"value",
"from",
"the",
"path",
"specifed",
"."
] |
53775fdfc95161f4aca350305cb3459e6f2f808d
|
https://github.com/selenol/selenol-python/blob/53775fdfc95161f4aca350305cb3459e6f2f808d/selenol_python/params.py#L69-L81
|
242,431
|
selenol/selenol-python
|
selenol_python/params.py
|
get_object_from_content
|
def get_object_from_content(entity, key):
"""Get an object from the database given an entity and the content key.
:param entity: Class type of the object to retrieve.
:param key: Array that defines the path of the value inside the message.
"""
def object_from_content_function(service, message):
"""Actual implementation of get_object_from_content function.
:param service: SelenolService object.
:param message: SelenolMessage request.
"""
id_ = get_value_from_content(key)(service, message)
result = service.session.query(entity).get(id_)
if not result:
raise SelenolInvalidArgumentException(key, id_)
return result
return object_from_content_function
|
python
|
def get_object_from_content(entity, key):
"""Get an object from the database given an entity and the content key.
:param entity: Class type of the object to retrieve.
:param key: Array that defines the path of the value inside the message.
"""
def object_from_content_function(service, message):
"""Actual implementation of get_object_from_content function.
:param service: SelenolService object.
:param message: SelenolMessage request.
"""
id_ = get_value_from_content(key)(service, message)
result = service.session.query(entity).get(id_)
if not result:
raise SelenolInvalidArgumentException(key, id_)
return result
return object_from_content_function
|
[
"def",
"get_object_from_content",
"(",
"entity",
",",
"key",
")",
":",
"def",
"object_from_content_function",
"(",
"service",
",",
"message",
")",
":",
"\"\"\"Actual implementation of get_object_from_content function.\n\n :param service: SelenolService object.\n :param message: SelenolMessage request.\n \"\"\"",
"id_",
"=",
"get_value_from_content",
"(",
"key",
")",
"(",
"service",
",",
"message",
")",
"result",
"=",
"service",
".",
"session",
".",
"query",
"(",
"entity",
")",
".",
"get",
"(",
"id_",
")",
"if",
"not",
"result",
":",
"raise",
"SelenolInvalidArgumentException",
"(",
"key",
",",
"id_",
")",
"return",
"result",
"return",
"object_from_content_function"
] |
Get an object from the database given an entity and the content key.
:param entity: Class type of the object to retrieve.
:param key: Array that defines the path of the value inside the message.
|
[
"Get",
"an",
"object",
"from",
"the",
"database",
"given",
"an",
"entity",
"and",
"the",
"content",
"key",
"."
] |
53775fdfc95161f4aca350305cb3459e6f2f808d
|
https://github.com/selenol/selenol-python/blob/53775fdfc95161f4aca350305cb3459e6f2f808d/selenol_python/params.py#L84-L101
|
242,432
|
selenol/selenol-python
|
selenol_python/params.py
|
get_object_from_session
|
def get_object_from_session(entity, key):
"""Get an object from the database given an entity and the session key.
:param entity: Class type of the object to retrieve.
:param key: Array that defines the path of the value inside the message.
"""
def object_from_session_function(service, message):
"""Actual implementation of get_object_from_session function.
:param service: SelenolService object.
:param message: SelenolMessage request.
"""
id_ = get_value_from_session(key)(service, message)
result = service.session.query(entity).get(id_)
if not result:
raise SelenolInvalidArgumentException(key, id_)
return result
return object_from_session_function
|
python
|
def get_object_from_session(entity, key):
"""Get an object from the database given an entity and the session key.
:param entity: Class type of the object to retrieve.
:param key: Array that defines the path of the value inside the message.
"""
def object_from_session_function(service, message):
"""Actual implementation of get_object_from_session function.
:param service: SelenolService object.
:param message: SelenolMessage request.
"""
id_ = get_value_from_session(key)(service, message)
result = service.session.query(entity).get(id_)
if not result:
raise SelenolInvalidArgumentException(key, id_)
return result
return object_from_session_function
|
[
"def",
"get_object_from_session",
"(",
"entity",
",",
"key",
")",
":",
"def",
"object_from_session_function",
"(",
"service",
",",
"message",
")",
":",
"\"\"\"Actual implementation of get_object_from_session function.\n\n :param service: SelenolService object.\n :param message: SelenolMessage request.\n \"\"\"",
"id_",
"=",
"get_value_from_session",
"(",
"key",
")",
"(",
"service",
",",
"message",
")",
"result",
"=",
"service",
".",
"session",
".",
"query",
"(",
"entity",
")",
".",
"get",
"(",
"id_",
")",
"if",
"not",
"result",
":",
"raise",
"SelenolInvalidArgumentException",
"(",
"key",
",",
"id_",
")",
"return",
"result",
"return",
"object_from_session_function"
] |
Get an object from the database given an entity and the session key.
:param entity: Class type of the object to retrieve.
:param key: Array that defines the path of the value inside the message.
|
[
"Get",
"an",
"object",
"from",
"the",
"database",
"given",
"an",
"entity",
"and",
"the",
"session",
"key",
"."
] |
53775fdfc95161f4aca350305cb3459e6f2f808d
|
https://github.com/selenol/selenol-python/blob/53775fdfc95161f4aca350305cb3459e6f2f808d/selenol_python/params.py#L104-L121
|
242,433
|
uw-it-aca/uw-restclients-grad
|
uw_grad/petition.py
|
_process_json
|
def _process_json(data):
"""
return a list of GradPetition objects.
"""
requests = []
for item in data:
petition = GradPetition()
petition.description = item.get('description')
petition.submit_date = parse_datetime(item.get('submitDate'))
petition.decision_date = parse_datetime(item.get('decisionDate'))
if item.get('deptRecommend') and len(item.get('deptRecommend')):
petition.dept_recommend = item.get('deptRecommend').lower()
if item.get('gradSchoolDecision') and\
len(item.get('gradSchoolDecision')):
petition.gradschool_decision =\
item.get('gradSchoolDecision').lower()
requests.append(petition)
return requests
|
python
|
def _process_json(data):
"""
return a list of GradPetition objects.
"""
requests = []
for item in data:
petition = GradPetition()
petition.description = item.get('description')
petition.submit_date = parse_datetime(item.get('submitDate'))
petition.decision_date = parse_datetime(item.get('decisionDate'))
if item.get('deptRecommend') and len(item.get('deptRecommend')):
petition.dept_recommend = item.get('deptRecommend').lower()
if item.get('gradSchoolDecision') and\
len(item.get('gradSchoolDecision')):
petition.gradschool_decision =\
item.get('gradSchoolDecision').lower()
requests.append(petition)
return requests
|
[
"def",
"_process_json",
"(",
"data",
")",
":",
"requests",
"=",
"[",
"]",
"for",
"item",
"in",
"data",
":",
"petition",
"=",
"GradPetition",
"(",
")",
"petition",
".",
"description",
"=",
"item",
".",
"get",
"(",
"'description'",
")",
"petition",
".",
"submit_date",
"=",
"parse_datetime",
"(",
"item",
".",
"get",
"(",
"'submitDate'",
")",
")",
"petition",
".",
"decision_date",
"=",
"parse_datetime",
"(",
"item",
".",
"get",
"(",
"'decisionDate'",
")",
")",
"if",
"item",
".",
"get",
"(",
"'deptRecommend'",
")",
"and",
"len",
"(",
"item",
".",
"get",
"(",
"'deptRecommend'",
")",
")",
":",
"petition",
".",
"dept_recommend",
"=",
"item",
".",
"get",
"(",
"'deptRecommend'",
")",
".",
"lower",
"(",
")",
"if",
"item",
".",
"get",
"(",
"'gradSchoolDecision'",
")",
"and",
"len",
"(",
"item",
".",
"get",
"(",
"'gradSchoolDecision'",
")",
")",
":",
"petition",
".",
"gradschool_decision",
"=",
"item",
".",
"get",
"(",
"'gradSchoolDecision'",
")",
".",
"lower",
"(",
")",
"requests",
".",
"append",
"(",
"petition",
")",
"return",
"requests"
] |
return a list of GradPetition objects.
|
[
"return",
"a",
"list",
"of",
"GradPetition",
"objects",
"."
] |
ca06ed2f24f3683314a5690f6078e97d37fc8e52
|
https://github.com/uw-it-aca/uw-restclients-grad/blob/ca06ed2f24f3683314a5690f6078e97d37fc8e52/uw_grad/petition.py#L20-L39
|
242,434
|
jhazelwo/python-fileasobj
|
docs/examples.py
|
example_add_line_to_file
|
def example_add_line_to_file():
""" Different methods to append a given line to the file, all work the same. """
my_file = FileAsObj('/tmp/example_file.txt')
my_file.add('foo')
my_file.append('bar')
# Add a new line to my_file that contains the word 'lol' and print True|False if my_file was changed.
print(my_file + 'lol')
# Add line even if it already exists in the file.
my_file.unique = False
my_file.add('foo')
|
python
|
def example_add_line_to_file():
""" Different methods to append a given line to the file, all work the same. """
my_file = FileAsObj('/tmp/example_file.txt')
my_file.add('foo')
my_file.append('bar')
# Add a new line to my_file that contains the word 'lol' and print True|False if my_file was changed.
print(my_file + 'lol')
# Add line even if it already exists in the file.
my_file.unique = False
my_file.add('foo')
|
[
"def",
"example_add_line_to_file",
"(",
")",
":",
"my_file",
"=",
"FileAsObj",
"(",
"'/tmp/example_file.txt'",
")",
"my_file",
".",
"add",
"(",
"'foo'",
")",
"my_file",
".",
"append",
"(",
"'bar'",
")",
"# Add a new line to my_file that contains the word 'lol' and print True|False if my_file was changed.",
"print",
"(",
"my_file",
"+",
"'lol'",
")",
"# Add line even if it already exists in the file.",
"my_file",
".",
"unique",
"=",
"False",
"my_file",
".",
"add",
"(",
"'foo'",
")"
] |
Different methods to append a given line to the file, all work the same.
|
[
"Different",
"methods",
"to",
"append",
"a",
"given",
"line",
"to",
"the",
"file",
"all",
"work",
"the",
"same",
"."
] |
4bdbb575e75da830b88d10d0c1020d787ceba44d
|
https://github.com/jhazelwo/python-fileasobj/blob/4bdbb575e75da830b88d10d0c1020d787ceba44d/docs/examples.py#L63-L72
|
242,435
|
jhazelwo/python-fileasobj
|
docs/examples.py
|
example_write_file_to_disk_if_changed
|
def example_write_file_to_disk_if_changed():
""" Try to remove all comments from a file, and save it if changes were made. """
my_file = FileAsObj('/tmp/example_file.txt')
my_file.rm(my_file.egrep('^#'))
if my_file.changed:
my_file.save()
|
python
|
def example_write_file_to_disk_if_changed():
""" Try to remove all comments from a file, and save it if changes were made. """
my_file = FileAsObj('/tmp/example_file.txt')
my_file.rm(my_file.egrep('^#'))
if my_file.changed:
my_file.save()
|
[
"def",
"example_write_file_to_disk_if_changed",
"(",
")",
":",
"my_file",
"=",
"FileAsObj",
"(",
"'/tmp/example_file.txt'",
")",
"my_file",
".",
"rm",
"(",
"my_file",
".",
"egrep",
"(",
"'^#'",
")",
")",
"if",
"my_file",
".",
"changed",
":",
"my_file",
".",
"save",
"(",
")"
] |
Try to remove all comments from a file, and save it if changes were made.
|
[
"Try",
"to",
"remove",
"all",
"comments",
"from",
"a",
"file",
"and",
"save",
"it",
"if",
"changes",
"were",
"made",
"."
] |
4bdbb575e75da830b88d10d0c1020d787ceba44d
|
https://github.com/jhazelwo/python-fileasobj/blob/4bdbb575e75da830b88d10d0c1020d787ceba44d/docs/examples.py#L132-L137
|
242,436
|
jhazelwo/python-fileasobj
|
docs/examples.py
|
example_all
|
def example_all():
"""
Use a bunch of methods on a file.
"""
my_file = FileAsObj()
my_file.filename = '/tmp/example_file.txt'
my_file.add('# First change!')
my_file.save()
my_file = FileAsObj('/tmp/example_file.txt')
my_file.unique = True
my_file.sorted = True
my_file.add('1')
my_file.add('1')
my_file.add('2')
my_file.add('20 foo')
my_file.add('200 bar')
my_file.add('# Comment')
my_file.unique = False
my_file.add('# Comment')
my_file.add('# Comment')
my_file.unique = True
my_file.rm(my_file.egrep('^#.*'))
my_file.rm(my_file.grep('foo'))
my_file.replace(my_file.egrep('^2'), 'This line was replaced.')
print(my_file)
print(my_file.log)
|
python
|
def example_all():
"""
Use a bunch of methods on a file.
"""
my_file = FileAsObj()
my_file.filename = '/tmp/example_file.txt'
my_file.add('# First change!')
my_file.save()
my_file = FileAsObj('/tmp/example_file.txt')
my_file.unique = True
my_file.sorted = True
my_file.add('1')
my_file.add('1')
my_file.add('2')
my_file.add('20 foo')
my_file.add('200 bar')
my_file.add('# Comment')
my_file.unique = False
my_file.add('# Comment')
my_file.add('# Comment')
my_file.unique = True
my_file.rm(my_file.egrep('^#.*'))
my_file.rm(my_file.grep('foo'))
my_file.replace(my_file.egrep('^2'), 'This line was replaced.')
print(my_file)
print(my_file.log)
|
[
"def",
"example_all",
"(",
")",
":",
"my_file",
"=",
"FileAsObj",
"(",
")",
"my_file",
".",
"filename",
"=",
"'/tmp/example_file.txt'",
"my_file",
".",
"add",
"(",
"'# First change!'",
")",
"my_file",
".",
"save",
"(",
")",
"my_file",
"=",
"FileAsObj",
"(",
"'/tmp/example_file.txt'",
")",
"my_file",
".",
"unique",
"=",
"True",
"my_file",
".",
"sorted",
"=",
"True",
"my_file",
".",
"add",
"(",
"'1'",
")",
"my_file",
".",
"add",
"(",
"'1'",
")",
"my_file",
".",
"add",
"(",
"'2'",
")",
"my_file",
".",
"add",
"(",
"'20 foo'",
")",
"my_file",
".",
"add",
"(",
"'200 bar'",
")",
"my_file",
".",
"add",
"(",
"'# Comment'",
")",
"my_file",
".",
"unique",
"=",
"False",
"my_file",
".",
"add",
"(",
"'# Comment'",
")",
"my_file",
".",
"add",
"(",
"'# Comment'",
")",
"my_file",
".",
"unique",
"=",
"True",
"my_file",
".",
"rm",
"(",
"my_file",
".",
"egrep",
"(",
"'^#.*'",
")",
")",
"my_file",
".",
"rm",
"(",
"my_file",
".",
"grep",
"(",
"'foo'",
")",
")",
"my_file",
".",
"replace",
"(",
"my_file",
".",
"egrep",
"(",
"'^2'",
")",
",",
"'This line was replaced.'",
")",
"print",
"(",
"my_file",
")",
"print",
"(",
"my_file",
".",
"log",
")"
] |
Use a bunch of methods on a file.
|
[
"Use",
"a",
"bunch",
"of",
"methods",
"on",
"a",
"file",
"."
] |
4bdbb575e75da830b88d10d0c1020d787ceba44d
|
https://github.com/jhazelwo/python-fileasobj/blob/4bdbb575e75da830b88d10d0c1020d787ceba44d/docs/examples.py#L173-L198
|
242,437
|
ryanjdillon/pylleo
|
pylleo/lleocal.py
|
get_cal_data
|
def get_cal_data(data_df, cal_dict, param):
'''Get data along specified axis during calibration intervals
Args
----
data_df: pandas.DataFrame
Pandas dataframe with lleo data
cal_dict: dict
Calibration dictionary
Returns
-------
lower: pandas dataframe
slice of lleo datafram containing points at -1g calibration position
upper: pandas dataframe
slice of lleo datafram containing points at -1g calibration position
See also
--------
lleoio.read_data: creates pandas dataframe `data_df`
read_cal: creates `cal_dict` and describes fields
'''
param = param.lower().replace(' ','_').replace('-','_')
idx_lower_start = cal_dict['parameters'][param]['lower']['start']
idx_lower_end = cal_dict['parameters'][param]['lower']['end']
idx_upper_start = cal_dict['parameters'][param]['upper']['start']
idx_upper_end = cal_dict['parameters'][param]['upper']['end']
idx_lower = (data_df.index >= idx_lower_start) & \
(data_df.index <= idx_lower_end)
idx_upper = (data_df.index >= idx_upper_start) & \
(data_df.index <= idx_upper_end)
return data_df[param][idx_lower], data_df[param][idx_upper]
|
python
|
def get_cal_data(data_df, cal_dict, param):
'''Get data along specified axis during calibration intervals
Args
----
data_df: pandas.DataFrame
Pandas dataframe with lleo data
cal_dict: dict
Calibration dictionary
Returns
-------
lower: pandas dataframe
slice of lleo datafram containing points at -1g calibration position
upper: pandas dataframe
slice of lleo datafram containing points at -1g calibration position
See also
--------
lleoio.read_data: creates pandas dataframe `data_df`
read_cal: creates `cal_dict` and describes fields
'''
param = param.lower().replace(' ','_').replace('-','_')
idx_lower_start = cal_dict['parameters'][param]['lower']['start']
idx_lower_end = cal_dict['parameters'][param]['lower']['end']
idx_upper_start = cal_dict['parameters'][param]['upper']['start']
idx_upper_end = cal_dict['parameters'][param]['upper']['end']
idx_lower = (data_df.index >= idx_lower_start) & \
(data_df.index <= idx_lower_end)
idx_upper = (data_df.index >= idx_upper_start) & \
(data_df.index <= idx_upper_end)
return data_df[param][idx_lower], data_df[param][idx_upper]
|
[
"def",
"get_cal_data",
"(",
"data_df",
",",
"cal_dict",
",",
"param",
")",
":",
"param",
"=",
"param",
".",
"lower",
"(",
")",
".",
"replace",
"(",
"' '",
",",
"'_'",
")",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")",
"idx_lower_start",
"=",
"cal_dict",
"[",
"'parameters'",
"]",
"[",
"param",
"]",
"[",
"'lower'",
"]",
"[",
"'start'",
"]",
"idx_lower_end",
"=",
"cal_dict",
"[",
"'parameters'",
"]",
"[",
"param",
"]",
"[",
"'lower'",
"]",
"[",
"'end'",
"]",
"idx_upper_start",
"=",
"cal_dict",
"[",
"'parameters'",
"]",
"[",
"param",
"]",
"[",
"'upper'",
"]",
"[",
"'start'",
"]",
"idx_upper_end",
"=",
"cal_dict",
"[",
"'parameters'",
"]",
"[",
"param",
"]",
"[",
"'upper'",
"]",
"[",
"'end'",
"]",
"idx_lower",
"=",
"(",
"data_df",
".",
"index",
">=",
"idx_lower_start",
")",
"&",
"(",
"data_df",
".",
"index",
"<=",
"idx_lower_end",
")",
"idx_upper",
"=",
"(",
"data_df",
".",
"index",
">=",
"idx_upper_start",
")",
"&",
"(",
"data_df",
".",
"index",
"<=",
"idx_upper_end",
")",
"return",
"data_df",
"[",
"param",
"]",
"[",
"idx_lower",
"]",
",",
"data_df",
"[",
"param",
"]",
"[",
"idx_upper",
"]"
] |
Get data along specified axis during calibration intervals
Args
----
data_df: pandas.DataFrame
Pandas dataframe with lleo data
cal_dict: dict
Calibration dictionary
Returns
-------
lower: pandas dataframe
slice of lleo datafram containing points at -1g calibration position
upper: pandas dataframe
slice of lleo datafram containing points at -1g calibration position
See also
--------
lleoio.read_data: creates pandas dataframe `data_df`
read_cal: creates `cal_dict` and describes fields
|
[
"Get",
"data",
"along",
"specified",
"axis",
"during",
"calibration",
"intervals"
] |
b9b999fef19eaeccce4f207ab1b6198287c1bfec
|
https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/lleocal.py#L2-L38
|
242,438
|
ryanjdillon/pylleo
|
pylleo/lleocal.py
|
read_cal
|
def read_cal(cal_yaml_path):
'''Load calibration file if exists, else create
Args
----
cal_yaml_path: str
Path to calibration YAML file
Returns
-------
cal_dict: dict
Key value pairs of calibration meta data
'''
from collections import OrderedDict
import datetime
import os
import warnings
import yamlord
from . import utils
def __create_cal(cal_yaml_path):
cal_dict = OrderedDict()
# Add experiment name for calibration reference
base_path, _ = os.path.split(cal_yaml_path)
_, exp_name = os.path.split(base_path)
cal_dict['experiment'] = exp_name
return cal_dict
# Try reading cal file, else create
if os.path.isfile(cal_yaml_path):
cal_dict = yamlord.read_yaml(cal_yaml_path)
else:
cal_dict = __create_cal(cal_yaml_path)
cal_dict['parameters'] = OrderedDict()
for key, val in utils.parse_experiment_params(cal_dict['experiment']).items():
cal_dict[key] = val
fmt = "%Y-%m-%d %H:%M:%S"
cal_dict['date_modified'] = datetime.datetime.now().strftime(fmt)
return cal_dict
|
python
|
def read_cal(cal_yaml_path):
'''Load calibration file if exists, else create
Args
----
cal_yaml_path: str
Path to calibration YAML file
Returns
-------
cal_dict: dict
Key value pairs of calibration meta data
'''
from collections import OrderedDict
import datetime
import os
import warnings
import yamlord
from . import utils
def __create_cal(cal_yaml_path):
cal_dict = OrderedDict()
# Add experiment name for calibration reference
base_path, _ = os.path.split(cal_yaml_path)
_, exp_name = os.path.split(base_path)
cal_dict['experiment'] = exp_name
return cal_dict
# Try reading cal file, else create
if os.path.isfile(cal_yaml_path):
cal_dict = yamlord.read_yaml(cal_yaml_path)
else:
cal_dict = __create_cal(cal_yaml_path)
cal_dict['parameters'] = OrderedDict()
for key, val in utils.parse_experiment_params(cal_dict['experiment']).items():
cal_dict[key] = val
fmt = "%Y-%m-%d %H:%M:%S"
cal_dict['date_modified'] = datetime.datetime.now().strftime(fmt)
return cal_dict
|
[
"def",
"read_cal",
"(",
"cal_yaml_path",
")",
":",
"from",
"collections",
"import",
"OrderedDict",
"import",
"datetime",
"import",
"os",
"import",
"warnings",
"import",
"yamlord",
"from",
".",
"import",
"utils",
"def",
"__create_cal",
"(",
"cal_yaml_path",
")",
":",
"cal_dict",
"=",
"OrderedDict",
"(",
")",
"# Add experiment name for calibration reference",
"base_path",
",",
"_",
"=",
"os",
".",
"path",
".",
"split",
"(",
"cal_yaml_path",
")",
"_",
",",
"exp_name",
"=",
"os",
".",
"path",
".",
"split",
"(",
"base_path",
")",
"cal_dict",
"[",
"'experiment'",
"]",
"=",
"exp_name",
"return",
"cal_dict",
"# Try reading cal file, else create",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"cal_yaml_path",
")",
":",
"cal_dict",
"=",
"yamlord",
".",
"read_yaml",
"(",
"cal_yaml_path",
")",
"else",
":",
"cal_dict",
"=",
"__create_cal",
"(",
"cal_yaml_path",
")",
"cal_dict",
"[",
"'parameters'",
"]",
"=",
"OrderedDict",
"(",
")",
"for",
"key",
",",
"val",
"in",
"utils",
".",
"parse_experiment_params",
"(",
"cal_dict",
"[",
"'experiment'",
"]",
")",
".",
"items",
"(",
")",
":",
"cal_dict",
"[",
"key",
"]",
"=",
"val",
"fmt",
"=",
"\"%Y-%m-%d %H:%M:%S\"",
"cal_dict",
"[",
"'date_modified'",
"]",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"fmt",
")",
"return",
"cal_dict"
] |
Load calibration file if exists, else create
Args
----
cal_yaml_path: str
Path to calibration YAML file
Returns
-------
cal_dict: dict
Key value pairs of calibration meta data
|
[
"Load",
"calibration",
"file",
"if",
"exists",
"else",
"create"
] |
b9b999fef19eaeccce4f207ab1b6198287c1bfec
|
https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/lleocal.py#L41-L85
|
242,439
|
ryanjdillon/pylleo
|
pylleo/lleocal.py
|
update
|
def update(data_df, cal_dict, param, bound, start, end):
'''Update calibration times for give parameter and boundary'''
from collections import OrderedDict
if param not in cal_dict['parameters']:
cal_dict['parameters'][param] = OrderedDict()
if bound not in cal_dict['parameters'][param]:
cal_dict['parameters'][param][bound] = OrderedDict()
cal_dict['parameters'][param][bound]['start'] = start
cal_dict['parameters'][param][bound]['end'] = end
return cal_dict
|
python
|
def update(data_df, cal_dict, param, bound, start, end):
'''Update calibration times for give parameter and boundary'''
from collections import OrderedDict
if param not in cal_dict['parameters']:
cal_dict['parameters'][param] = OrderedDict()
if bound not in cal_dict['parameters'][param]:
cal_dict['parameters'][param][bound] = OrderedDict()
cal_dict['parameters'][param][bound]['start'] = start
cal_dict['parameters'][param][bound]['end'] = end
return cal_dict
|
[
"def",
"update",
"(",
"data_df",
",",
"cal_dict",
",",
"param",
",",
"bound",
",",
"start",
",",
"end",
")",
":",
"from",
"collections",
"import",
"OrderedDict",
"if",
"param",
"not",
"in",
"cal_dict",
"[",
"'parameters'",
"]",
":",
"cal_dict",
"[",
"'parameters'",
"]",
"[",
"param",
"]",
"=",
"OrderedDict",
"(",
")",
"if",
"bound",
"not",
"in",
"cal_dict",
"[",
"'parameters'",
"]",
"[",
"param",
"]",
":",
"cal_dict",
"[",
"'parameters'",
"]",
"[",
"param",
"]",
"[",
"bound",
"]",
"=",
"OrderedDict",
"(",
")",
"cal_dict",
"[",
"'parameters'",
"]",
"[",
"param",
"]",
"[",
"bound",
"]",
"[",
"'start'",
"]",
"=",
"start",
"cal_dict",
"[",
"'parameters'",
"]",
"[",
"param",
"]",
"[",
"bound",
"]",
"[",
"'end'",
"]",
"=",
"end",
"return",
"cal_dict"
] |
Update calibration times for give parameter and boundary
|
[
"Update",
"calibration",
"times",
"for",
"give",
"parameter",
"and",
"boundary"
] |
b9b999fef19eaeccce4f207ab1b6198287c1bfec
|
https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/lleocal.py#L88-L100
|
242,440
|
ryanjdillon/pylleo
|
pylleo/lleocal.py
|
fit1d
|
def fit1d(lower, upper):
'''Fit acceleration data at lower and upper boundaries of gravity
Args
----
lower: pandas dataframe
slice of lleo datafram containing points at -1g calibration position
upper: pandas dataframe
slice of lleo datafram containing points at -1g calibration position
Returns
-------
p: ndarray
Polynomial coefficients, highest power first. If y was 2-D, the
coefficients for k-th data set are in p[:,k]. From `numpy.polyfit()`.
NOTE
----
This method should be compared agaist alternate linalg method, which allows
for 2d for 2d poly, see - http://stackoverflow.com/a/33966967/943773
A = numpy.vstack(lower, upper).transpose()
y = A[:,1]
m, c = numpy.linalg.lstsq(A, y)[0]
'''
import numpy
# Get smallest size as index position for slicing
idx = min(len(lower), len(upper))
# Stack accelerometer count values for upper and lower bounds of curve
x = numpy.hstack((lower[:idx].values, upper[:idx].values))
x = x.astype(float)
# Make corresponding y array where all lower bound points equal -g
# and all upper bound points equal +g
y = numpy.zeros(len(x), dtype=float)
y[:idx] = -1.0 # negative gravity
y[idx:] = 1.0 # positive gravity
return numpy.polyfit(x, y, deg=1)
|
python
|
def fit1d(lower, upper):
'''Fit acceleration data at lower and upper boundaries of gravity
Args
----
lower: pandas dataframe
slice of lleo datafram containing points at -1g calibration position
upper: pandas dataframe
slice of lleo datafram containing points at -1g calibration position
Returns
-------
p: ndarray
Polynomial coefficients, highest power first. If y was 2-D, the
coefficients for k-th data set are in p[:,k]. From `numpy.polyfit()`.
NOTE
----
This method should be compared agaist alternate linalg method, which allows
for 2d for 2d poly, see - http://stackoverflow.com/a/33966967/943773
A = numpy.vstack(lower, upper).transpose()
y = A[:,1]
m, c = numpy.linalg.lstsq(A, y)[0]
'''
import numpy
# Get smallest size as index position for slicing
idx = min(len(lower), len(upper))
# Stack accelerometer count values for upper and lower bounds of curve
x = numpy.hstack((lower[:idx].values, upper[:idx].values))
x = x.astype(float)
# Make corresponding y array where all lower bound points equal -g
# and all upper bound points equal +g
y = numpy.zeros(len(x), dtype=float)
y[:idx] = -1.0 # negative gravity
y[idx:] = 1.0 # positive gravity
return numpy.polyfit(x, y, deg=1)
|
[
"def",
"fit1d",
"(",
"lower",
",",
"upper",
")",
":",
"import",
"numpy",
"# Get smallest size as index position for slicing",
"idx",
"=",
"min",
"(",
"len",
"(",
"lower",
")",
",",
"len",
"(",
"upper",
")",
")",
"# Stack accelerometer count values for upper and lower bounds of curve",
"x",
"=",
"numpy",
".",
"hstack",
"(",
"(",
"lower",
"[",
":",
"idx",
"]",
".",
"values",
",",
"upper",
"[",
":",
"idx",
"]",
".",
"values",
")",
")",
"x",
"=",
"x",
".",
"astype",
"(",
"float",
")",
"# Make corresponding y array where all lower bound points equal -g",
"# and all upper bound points equal +g",
"y",
"=",
"numpy",
".",
"zeros",
"(",
"len",
"(",
"x",
")",
",",
"dtype",
"=",
"float",
")",
"y",
"[",
":",
"idx",
"]",
"=",
"-",
"1.0",
"# negative gravity",
"y",
"[",
"idx",
":",
"]",
"=",
"1.0",
"# positive gravity",
"return",
"numpy",
".",
"polyfit",
"(",
"x",
",",
"y",
",",
"deg",
"=",
"1",
")"
] |
Fit acceleration data at lower and upper boundaries of gravity
Args
----
lower: pandas dataframe
slice of lleo datafram containing points at -1g calibration position
upper: pandas dataframe
slice of lleo datafram containing points at -1g calibration position
Returns
-------
p: ndarray
Polynomial coefficients, highest power first. If y was 2-D, the
coefficients for k-th data set are in p[:,k]. From `numpy.polyfit()`.
NOTE
----
This method should be compared agaist alternate linalg method, which allows
for 2d for 2d poly, see - http://stackoverflow.com/a/33966967/943773
A = numpy.vstack(lower, upper).transpose()
y = A[:,1]
m, c = numpy.linalg.lstsq(A, y)[0]
|
[
"Fit",
"acceleration",
"data",
"at",
"lower",
"and",
"upper",
"boundaries",
"of",
"gravity"
] |
b9b999fef19eaeccce4f207ab1b6198287c1bfec
|
https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/lleocal.py#L103-L143
|
242,441
|
robertchase/ergaleia
|
ergaleia/un_comment.py
|
un_comment
|
def un_comment(s, comment='#', strip=True):
"""Uncomment a string or list of strings
truncate s at first occurrence of a non-escaped comment character
remove escapes from escaped comment characters
Parameters:
s - string to uncomment
comment - comment character (default=#) (see Note 1)
strip - strip line after uncomment (default=True)
Notes:
1. Comment character can be escaped using \
2. If a tuple or list is provided, a list of the same length will
be returned, with each string in the list uncommented. Some
lines may be zero length.
"""
def _un_comment(string):
result = re.split(r'(?<!\\)' + comment, string, maxsplit=1)[0]
result = re.sub(r'\\' + comment, comment, result)
if strip:
return result.strip()
return result
if isinstance(s, (tuple, list)):
return [_un_comment(line) for line in s]
return _un_comment(s)
|
python
|
def un_comment(s, comment='#', strip=True):
"""Uncomment a string or list of strings
truncate s at first occurrence of a non-escaped comment character
remove escapes from escaped comment characters
Parameters:
s - string to uncomment
comment - comment character (default=#) (see Note 1)
strip - strip line after uncomment (default=True)
Notes:
1. Comment character can be escaped using \
2. If a tuple or list is provided, a list of the same length will
be returned, with each string in the list uncommented. Some
lines may be zero length.
"""
def _un_comment(string):
result = re.split(r'(?<!\\)' + comment, string, maxsplit=1)[0]
result = re.sub(r'\\' + comment, comment, result)
if strip:
return result.strip()
return result
if isinstance(s, (tuple, list)):
return [_un_comment(line) for line in s]
return _un_comment(s)
|
[
"def",
"un_comment",
"(",
"s",
",",
"comment",
"=",
"'#'",
",",
"strip",
"=",
"True",
")",
":",
"def",
"_un_comment",
"(",
"string",
")",
":",
"result",
"=",
"re",
".",
"split",
"(",
"r'(?<!\\\\)'",
"+",
"comment",
",",
"string",
",",
"maxsplit",
"=",
"1",
")",
"[",
"0",
"]",
"result",
"=",
"re",
".",
"sub",
"(",
"r'\\\\'",
"+",
"comment",
",",
"comment",
",",
"result",
")",
"if",
"strip",
":",
"return",
"result",
".",
"strip",
"(",
")",
"return",
"result",
"if",
"isinstance",
"(",
"s",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"return",
"[",
"_un_comment",
"(",
"line",
")",
"for",
"line",
"in",
"s",
"]",
"return",
"_un_comment",
"(",
"s",
")"
] |
Uncomment a string or list of strings
truncate s at first occurrence of a non-escaped comment character
remove escapes from escaped comment characters
Parameters:
s - string to uncomment
comment - comment character (default=#) (see Note 1)
strip - strip line after uncomment (default=True)
Notes:
1. Comment character can be escaped using \
2. If a tuple or list is provided, a list of the same length will
be returned, with each string in the list uncommented. Some
lines may be zero length.
|
[
"Uncomment",
"a",
"string",
"or",
"list",
"of",
"strings"
] |
df8e9a4b18c563022a503faa27e822c9a5755490
|
https://github.com/robertchase/ergaleia/blob/df8e9a4b18c563022a503faa27e822c9a5755490/ergaleia/un_comment.py#L9-L36
|
242,442
|
shaypal5/comath
|
comath/func/func.py
|
get_smooth_step_function
|
def get_smooth_step_function(min_val, max_val, switch_point, smooth_factor):
"""Returns a function that moves smoothly between a minimal value and a
maximal one when its value increases from a given witch point to infinity.
Arguments
---------
min_val: float
max_val value the function will return when x=switch_point.
min_val: float
The value the function will converge to when x -> infinity.
switch_point: float
The point in which the function's value will become min_val. Smaller
x values will return values smaller than min_val.
smooth_factor: float
The bigger the smoother, and less cliff-like, is the function.
Returns
-------
function
The desired smooth function.
"""
dif = max_val - min_val
def _smooth_step(x):
return min_val + dif * tanh((x - switch_point) / smooth_factor)
return _smooth_step
|
python
|
def get_smooth_step_function(min_val, max_val, switch_point, smooth_factor):
"""Returns a function that moves smoothly between a minimal value and a
maximal one when its value increases from a given witch point to infinity.
Arguments
---------
min_val: float
max_val value the function will return when x=switch_point.
min_val: float
The value the function will converge to when x -> infinity.
switch_point: float
The point in which the function's value will become min_val. Smaller
x values will return values smaller than min_val.
smooth_factor: float
The bigger the smoother, and less cliff-like, is the function.
Returns
-------
function
The desired smooth function.
"""
dif = max_val - min_val
def _smooth_step(x):
return min_val + dif * tanh((x - switch_point) / smooth_factor)
return _smooth_step
|
[
"def",
"get_smooth_step_function",
"(",
"min_val",
",",
"max_val",
",",
"switch_point",
",",
"smooth_factor",
")",
":",
"dif",
"=",
"max_val",
"-",
"min_val",
"def",
"_smooth_step",
"(",
"x",
")",
":",
"return",
"min_val",
"+",
"dif",
"*",
"tanh",
"(",
"(",
"x",
"-",
"switch_point",
")",
"/",
"smooth_factor",
")",
"return",
"_smooth_step"
] |
Returns a function that moves smoothly between a minimal value and a
maximal one when its value increases from a given witch point to infinity.
Arguments
---------
min_val: float
max_val value the function will return when x=switch_point.
min_val: float
The value the function will converge to when x -> infinity.
switch_point: float
The point in which the function's value will become min_val. Smaller
x values will return values smaller than min_val.
smooth_factor: float
The bigger the smoother, and less cliff-like, is the function.
Returns
-------
function
The desired smooth function.
|
[
"Returns",
"a",
"function",
"that",
"moves",
"smoothly",
"between",
"a",
"minimal",
"value",
"and",
"a",
"maximal",
"one",
"when",
"its",
"value",
"increases",
"from",
"a",
"given",
"witch",
"point",
"to",
"infinity",
"."
] |
1333e3b96242a5bad9d3e699ffd58a1597fdc89f
|
https://github.com/shaypal5/comath/blob/1333e3b96242a5bad9d3e699ffd58a1597fdc89f/comath/func/func.py#L10-L34
|
242,443
|
jalanb/pysyte
|
pysyte/debuggers.py
|
stack_sources
|
def stack_sources():
"""A list of sources for frames above this"""
# lazy imports
import linecache
result = []
for frame_info in reversed(inspect.stack()):
_frame, filename, line_number, _function, _context, _index = frame_info
linecache.lazycache(filename, {})
_line = linecache.getline(filename, line_number).rstrip()
# Each record contains a frame object, filename, line number, function
# name, a list of lines of context, and index within the context
_sources = [(path, line) for _, path, line, _, _, _ in inspect.stack()]
return result
|
python
|
def stack_sources():
"""A list of sources for frames above this"""
# lazy imports
import linecache
result = []
for frame_info in reversed(inspect.stack()):
_frame, filename, line_number, _function, _context, _index = frame_info
linecache.lazycache(filename, {})
_line = linecache.getline(filename, line_number).rstrip()
# Each record contains a frame object, filename, line number, function
# name, a list of lines of context, and index within the context
_sources = [(path, line) for _, path, line, _, _, _ in inspect.stack()]
return result
|
[
"def",
"stack_sources",
"(",
")",
":",
"# lazy imports",
"import",
"linecache",
"result",
"=",
"[",
"]",
"for",
"frame_info",
"in",
"reversed",
"(",
"inspect",
".",
"stack",
"(",
")",
")",
":",
"_frame",
",",
"filename",
",",
"line_number",
",",
"_function",
",",
"_context",
",",
"_index",
"=",
"frame_info",
"linecache",
".",
"lazycache",
"(",
"filename",
",",
"{",
"}",
")",
"_line",
"=",
"linecache",
".",
"getline",
"(",
"filename",
",",
"line_number",
")",
".",
"rstrip",
"(",
")",
"# Each record contains a frame object, filename, line number, function",
"# name, a list of lines of context, and index within the context",
"_sources",
"=",
"[",
"(",
"path",
",",
"line",
")",
"for",
"_",
",",
"path",
",",
"line",
",",
"_",
",",
"_",
",",
"_",
"in",
"inspect",
".",
"stack",
"(",
")",
"]",
"return",
"result"
] |
A list of sources for frames above this
|
[
"A",
"list",
"of",
"sources",
"for",
"frames",
"above",
"this"
] |
4e278101943d1ceb1a6bcaf6ddc72052ecf13114
|
https://github.com/jalanb/pysyte/blob/4e278101943d1ceb1a6bcaf6ddc72052ecf13114/pysyte/debuggers.py#L151-L164
|
242,444
|
KyleWpppd/css-audit
|
cssaudit/parser.py
|
extract_leftmost_selector
|
def extract_leftmost_selector(selector_list):
"""
Because we aren't building a DOM tree to transverse, the only way
to get the most general selectors is to take the leftmost.
For example with `div.outer div.inner`, we can't tell if `div.inner`
has been used in context without building a tree.
"""
classes = set()
ids = set()
elements = set()
# print "Selector list: %s \n\n\n\n\n\n" % selector_list
for selector in selector_list:
selector = selector.split()[0]
if selector[0] == '.':
classes.add(selector)
elif selector[0] == '#':
ids.add(selector)
else:
elements.add(selector)
return {
'classes':classes,
'ids':ids,
'elements':elements,
}
|
python
|
def extract_leftmost_selector(selector_list):
"""
Because we aren't building a DOM tree to transverse, the only way
to get the most general selectors is to take the leftmost.
For example with `div.outer div.inner`, we can't tell if `div.inner`
has been used in context without building a tree.
"""
classes = set()
ids = set()
elements = set()
# print "Selector list: %s \n\n\n\n\n\n" % selector_list
for selector in selector_list:
selector = selector.split()[0]
if selector[0] == '.':
classes.add(selector)
elif selector[0] == '#':
ids.add(selector)
else:
elements.add(selector)
return {
'classes':classes,
'ids':ids,
'elements':elements,
}
|
[
"def",
"extract_leftmost_selector",
"(",
"selector_list",
")",
":",
"classes",
"=",
"set",
"(",
")",
"ids",
"=",
"set",
"(",
")",
"elements",
"=",
"set",
"(",
")",
"# print \"Selector list: %s \\n\\n\\n\\n\\n\\n\" % selector_list",
"for",
"selector",
"in",
"selector_list",
":",
"selector",
"=",
"selector",
".",
"split",
"(",
")",
"[",
"0",
"]",
"if",
"selector",
"[",
"0",
"]",
"==",
"'.'",
":",
"classes",
".",
"add",
"(",
"selector",
")",
"elif",
"selector",
"[",
"0",
"]",
"==",
"'#'",
":",
"ids",
".",
"add",
"(",
"selector",
")",
"else",
":",
"elements",
".",
"add",
"(",
"selector",
")",
"return",
"{",
"'classes'",
":",
"classes",
",",
"'ids'",
":",
"ids",
",",
"'elements'",
":",
"elements",
",",
"}"
] |
Because we aren't building a DOM tree to transverse, the only way
to get the most general selectors is to take the leftmost.
For example with `div.outer div.inner`, we can't tell if `div.inner`
has been used in context without building a tree.
|
[
"Because",
"we",
"aren",
"t",
"building",
"a",
"DOM",
"tree",
"to",
"transverse",
"the",
"only",
"way",
"to",
"get",
"the",
"most",
"general",
"selectors",
"is",
"to",
"take",
"the",
"leftmost",
".",
"For",
"example",
"with",
"div",
".",
"outer",
"div",
".",
"inner",
"we",
"can",
"t",
"tell",
"if",
"div",
".",
"inner",
"has",
"been",
"used",
"in",
"context",
"without",
"building",
"a",
"tree",
"."
] |
cab4d4204cf30d54bc1881deee6ad92ae6aacc56
|
https://github.com/KyleWpppd/css-audit/blob/cab4d4204cf30d54bc1881deee6ad92ae6aacc56/cssaudit/parser.py#L213-L236
|
242,445
|
KyleWpppd/css-audit
|
cssaudit/parser.py
|
Cssparser.append_styles
|
def append_styles(self, tag, attrs):
"""
Append classes found in HTML elements to the list of styles used.
Because we haven't built the tree, we aren't using the `tag` parameter
for now.
@param <string> tag
The HTML tag we're parsing
@param <tuple> attrs
A tuple of HTML element attributes such as 'class', 'id',
'style', etc. The tuple is of the form ('html_attribute',
'attr1', 'attr2', 'attr3' ... 'attrN')
"""
dattrs = dict(attrs)
if 'class' in dattrs:
#print "Found classes '%s'" % dattrs['class']
class_names = dattrs['class'].split()
dotted_names = map(prepend_dot,class_names)
dotted_names.sort()
self.used_classes.extend(' '.join(dotted_names))
self.unchained_classes.extend(dotted_names)
if 'id' in dattrs:
#print "Found id '%s'" % dattrs['id']
self.used_ids.extend(prepend_hash(dattrs['id'].strip()))
|
python
|
def append_styles(self, tag, attrs):
"""
Append classes found in HTML elements to the list of styles used.
Because we haven't built the tree, we aren't using the `tag` parameter
for now.
@param <string> tag
The HTML tag we're parsing
@param <tuple> attrs
A tuple of HTML element attributes such as 'class', 'id',
'style', etc. The tuple is of the form ('html_attribute',
'attr1', 'attr2', 'attr3' ... 'attrN')
"""
dattrs = dict(attrs)
if 'class' in dattrs:
#print "Found classes '%s'" % dattrs['class']
class_names = dattrs['class'].split()
dotted_names = map(prepend_dot,class_names)
dotted_names.sort()
self.used_classes.extend(' '.join(dotted_names))
self.unchained_classes.extend(dotted_names)
if 'id' in dattrs:
#print "Found id '%s'" % dattrs['id']
self.used_ids.extend(prepend_hash(dattrs['id'].strip()))
|
[
"def",
"append_styles",
"(",
"self",
",",
"tag",
",",
"attrs",
")",
":",
"dattrs",
"=",
"dict",
"(",
"attrs",
")",
"if",
"'class'",
"in",
"dattrs",
":",
"#print \"Found classes '%s'\" % dattrs['class']",
"class_names",
"=",
"dattrs",
"[",
"'class'",
"]",
".",
"split",
"(",
")",
"dotted_names",
"=",
"map",
"(",
"prepend_dot",
",",
"class_names",
")",
"dotted_names",
".",
"sort",
"(",
")",
"self",
".",
"used_classes",
".",
"extend",
"(",
"' '",
".",
"join",
"(",
"dotted_names",
")",
")",
"self",
".",
"unchained_classes",
".",
"extend",
"(",
"dotted_names",
")",
"if",
"'id'",
"in",
"dattrs",
":",
"#print \"Found id '%s'\" % dattrs['id']",
"self",
".",
"used_ids",
".",
"extend",
"(",
"prepend_hash",
"(",
"dattrs",
"[",
"'id'",
"]",
".",
"strip",
"(",
")",
")",
")"
] |
Append classes found in HTML elements to the list of styles used.
Because we haven't built the tree, we aren't using the `tag` parameter
for now.
@param <string> tag
The HTML tag we're parsing
@param <tuple> attrs
A tuple of HTML element attributes such as 'class', 'id',
'style', etc. The tuple is of the form ('html_attribute',
'attr1', 'attr2', 'attr3' ... 'attrN')
|
[
"Append",
"classes",
"found",
"in",
"HTML",
"elements",
"to",
"the",
"list",
"of",
"styles",
"used",
".",
"Because",
"we",
"haven",
"t",
"built",
"the",
"tree",
"we",
"aren",
"t",
"using",
"the",
"tag",
"parameter",
"for",
"now",
"."
] |
cab4d4204cf30d54bc1881deee6ad92ae6aacc56
|
https://github.com/KyleWpppd/css-audit/blob/cab4d4204cf30d54bc1881deee6ad92ae6aacc56/cssaudit/parser.py#L129-L151
|
242,446
|
marcosgabarda/django-belt
|
belt/decorators.py
|
delete_after
|
def delete_after(filename):
"""Decorator to be sure the file given by parameter is deleted after the
execution of the method.
"""
def delete_after_decorator(function):
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
finally:
if os.path.isfile(filename):
os.remove(filename)
if os.path.isdir(filename):
shutil.rmtree(filename)
return wrapper
return delete_after_decorator
|
python
|
def delete_after(filename):
"""Decorator to be sure the file given by parameter is deleted after the
execution of the method.
"""
def delete_after_decorator(function):
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
finally:
if os.path.isfile(filename):
os.remove(filename)
if os.path.isdir(filename):
shutil.rmtree(filename)
return wrapper
return delete_after_decorator
|
[
"def",
"delete_after",
"(",
"filename",
")",
":",
"def",
"delete_after_decorator",
"(",
"function",
")",
":",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"function",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"finally",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
":",
"os",
".",
"remove",
"(",
"filename",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"filename",
")",
":",
"shutil",
".",
"rmtree",
"(",
"filename",
")",
"return",
"wrapper",
"return",
"delete_after_decorator"
] |
Decorator to be sure the file given by parameter is deleted after the
execution of the method.
|
[
"Decorator",
"to",
"be",
"sure",
"the",
"file",
"given",
"by",
"parameter",
"is",
"deleted",
"after",
"the",
"execution",
"of",
"the",
"method",
"."
] |
81404604c4dff664b1520b01e1f638c9c6bab41b
|
https://github.com/marcosgabarda/django-belt/blob/81404604c4dff664b1520b01e1f638c9c6bab41b/belt/decorators.py#L8-L22
|
242,447
|
tBaxter/tango-shared-core
|
build/lib/tango_shared/utils/maptools.py
|
get_geocode
|
def get_geocode(city, state, street_address="", zipcode=""):
"""
For given location or object, takes address data and returns
latitude and longitude coordinates from Google geocoding service
get_geocode(self, street_address="1709 Grand Ave.", state="MO", zip="64112")
Returns a tuple of (lat, long)
Most times you'll want to join the return.
"""
try:
key = settings.GMAP_KEY
except AttributeError:
return "You need to put GMAP_KEY in settings"
# build valid location string
location = ""
if street_address:
location += '{}+'.format(street_address.replace(" ", "+"))
location += '{}+{}'.format(city.replace(" ", "+"), state)
if zipcode:
location += "+{}".format(zipcode)
url = "http://maps.google.com/maps/geo?q={}&output=xml&key={}".format(location, key)
file = urllib.urlopen(url).read()
try:
xml = xmltramp.parse(file)
except Exception as error:
print("Failed to parse xml file {}: {}".format(file, error))
return None
status = str(xml.Response.Status.code)
if status == "200":
geocode = str(xml.Response.Placemark.Point.coordinates).split(',')
# Flip geocode because geocoder returns long/lat while Maps wants lat/long.
# Yes, it's dumb.
return (geocode[1], geocode[0])
else:
print(status)
return None
|
python
|
def get_geocode(city, state, street_address="", zipcode=""):
"""
For given location or object, takes address data and returns
latitude and longitude coordinates from Google geocoding service
get_geocode(self, street_address="1709 Grand Ave.", state="MO", zip="64112")
Returns a tuple of (lat, long)
Most times you'll want to join the return.
"""
try:
key = settings.GMAP_KEY
except AttributeError:
return "You need to put GMAP_KEY in settings"
# build valid location string
location = ""
if street_address:
location += '{}+'.format(street_address.replace(" ", "+"))
location += '{}+{}'.format(city.replace(" ", "+"), state)
if zipcode:
location += "+{}".format(zipcode)
url = "http://maps.google.com/maps/geo?q={}&output=xml&key={}".format(location, key)
file = urllib.urlopen(url).read()
try:
xml = xmltramp.parse(file)
except Exception as error:
print("Failed to parse xml file {}: {}".format(file, error))
return None
status = str(xml.Response.Status.code)
if status == "200":
geocode = str(xml.Response.Placemark.Point.coordinates).split(',')
# Flip geocode because geocoder returns long/lat while Maps wants lat/long.
# Yes, it's dumb.
return (geocode[1], geocode[0])
else:
print(status)
return None
|
[
"def",
"get_geocode",
"(",
"city",
",",
"state",
",",
"street_address",
"=",
"\"\"",
",",
"zipcode",
"=",
"\"\"",
")",
":",
"try",
":",
"key",
"=",
"settings",
".",
"GMAP_KEY",
"except",
"AttributeError",
":",
"return",
"\"You need to put GMAP_KEY in settings\"",
"# build valid location string",
"location",
"=",
"\"\"",
"if",
"street_address",
":",
"location",
"+=",
"'{}+'",
".",
"format",
"(",
"street_address",
".",
"replace",
"(",
"\" \"",
",",
"\"+\"",
")",
")",
"location",
"+=",
"'{}+{}'",
".",
"format",
"(",
"city",
".",
"replace",
"(",
"\" \"",
",",
"\"+\"",
")",
",",
"state",
")",
"if",
"zipcode",
":",
"location",
"+=",
"\"+{}\"",
".",
"format",
"(",
"zipcode",
")",
"url",
"=",
"\"http://maps.google.com/maps/geo?q={}&output=xml&key={}\"",
".",
"format",
"(",
"location",
",",
"key",
")",
"file",
"=",
"urllib",
".",
"urlopen",
"(",
"url",
")",
".",
"read",
"(",
")",
"try",
":",
"xml",
"=",
"xmltramp",
".",
"parse",
"(",
"file",
")",
"except",
"Exception",
"as",
"error",
":",
"print",
"(",
"\"Failed to parse xml file {}: {}\"",
".",
"format",
"(",
"file",
",",
"error",
")",
")",
"return",
"None",
"status",
"=",
"str",
"(",
"xml",
".",
"Response",
".",
"Status",
".",
"code",
")",
"if",
"status",
"==",
"\"200\"",
":",
"geocode",
"=",
"str",
"(",
"xml",
".",
"Response",
".",
"Placemark",
".",
"Point",
".",
"coordinates",
")",
".",
"split",
"(",
"','",
")",
"# Flip geocode because geocoder returns long/lat while Maps wants lat/long.",
"# Yes, it's dumb.",
"return",
"(",
"geocode",
"[",
"1",
"]",
",",
"geocode",
"[",
"0",
"]",
")",
"else",
":",
"print",
"(",
"status",
")",
"return",
"None"
] |
For given location or object, takes address data and returns
latitude and longitude coordinates from Google geocoding service
get_geocode(self, street_address="1709 Grand Ave.", state="MO", zip="64112")
Returns a tuple of (lat, long)
Most times you'll want to join the return.
|
[
"For",
"given",
"location",
"or",
"object",
"takes",
"address",
"data",
"and",
"returns",
"latitude",
"and",
"longitude",
"coordinates",
"from",
"Google",
"geocoding",
"service"
] |
35fc10aef1ceedcdb4d6d866d44a22efff718812
|
https://github.com/tBaxter/tango-shared-core/blob/35fc10aef1ceedcdb4d6d866d44a22efff718812/build/lib/tango_shared/utils/maptools.py#L10-L50
|
242,448
|
dicaso/leopard
|
leopard/__init__.py
|
Section.list
|
def list(self,walkTrace=tuple(),case=None,element=None):
"""List section titles.
"""
if case == 'sectionmain': print(walkTrace,self.title)
|
python
|
def list(self,walkTrace=tuple(),case=None,element=None):
"""List section titles.
"""
if case == 'sectionmain': print(walkTrace,self.title)
|
[
"def",
"list",
"(",
"self",
",",
"walkTrace",
"=",
"tuple",
"(",
")",
",",
"case",
"=",
"None",
",",
"element",
"=",
"None",
")",
":",
"if",
"case",
"==",
"'sectionmain'",
":",
"print",
"(",
"walkTrace",
",",
"self",
".",
"title",
")"
] |
List section titles.
|
[
"List",
"section",
"titles",
"."
] |
ee9f45251aaacd1e453b135b419f4f0b50fb036e
|
https://github.com/dicaso/leopard/blob/ee9f45251aaacd1e453b135b419f4f0b50fb036e/leopard/__init__.py#L129-L132
|
242,449
|
dicaso/leopard
|
leopard/__init__.py
|
Section.listFigures
|
def listFigures(self,walkTrace=tuple(),case=None,element=None):
"""List section figures.
"""
if case == 'sectionmain': print(walkTrace,self.title)
if case == 'figure':
caption,fig = element
try:
print(walkTrace,fig._leopardref,caption)
except AttributeError:
fig._leopardref = next(self._reportSection._fignr)
print(walkTrace,fig._leopardref,caption)
|
python
|
def listFigures(self,walkTrace=tuple(),case=None,element=None):
"""List section figures.
"""
if case == 'sectionmain': print(walkTrace,self.title)
if case == 'figure':
caption,fig = element
try:
print(walkTrace,fig._leopardref,caption)
except AttributeError:
fig._leopardref = next(self._reportSection._fignr)
print(walkTrace,fig._leopardref,caption)
|
[
"def",
"listFigures",
"(",
"self",
",",
"walkTrace",
"=",
"tuple",
"(",
")",
",",
"case",
"=",
"None",
",",
"element",
"=",
"None",
")",
":",
"if",
"case",
"==",
"'sectionmain'",
":",
"print",
"(",
"walkTrace",
",",
"self",
".",
"title",
")",
"if",
"case",
"==",
"'figure'",
":",
"caption",
",",
"fig",
"=",
"element",
"try",
":",
"print",
"(",
"walkTrace",
",",
"fig",
".",
"_leopardref",
",",
"caption",
")",
"except",
"AttributeError",
":",
"fig",
".",
"_leopardref",
"=",
"next",
"(",
"self",
".",
"_reportSection",
".",
"_fignr",
")",
"print",
"(",
"walkTrace",
",",
"fig",
".",
"_leopardref",
",",
"caption",
")"
] |
List section figures.
|
[
"List",
"section",
"figures",
"."
] |
ee9f45251aaacd1e453b135b419f4f0b50fb036e
|
https://github.com/dicaso/leopard/blob/ee9f45251aaacd1e453b135b419f4f0b50fb036e/leopard/__init__.py#L135-L145
|
242,450
|
dicaso/leopard
|
leopard/__init__.py
|
Section.listTables
|
def listTables(self,walkTrace=tuple(),case=None,element=None):
"""List section tables.
"""
if case == 'sectionmain': print(walkTrace,self.title)
if case == 'table':
caption,tab = element
try:
print(walkTrace,tab._leopardref,caption)
except AttributeError:
tab._leopardref = next(self._reportSection._tabnr)
print(walkTrace,tab._leopardref,caption)
|
python
|
def listTables(self,walkTrace=tuple(),case=None,element=None):
"""List section tables.
"""
if case == 'sectionmain': print(walkTrace,self.title)
if case == 'table':
caption,tab = element
try:
print(walkTrace,tab._leopardref,caption)
except AttributeError:
tab._leopardref = next(self._reportSection._tabnr)
print(walkTrace,tab._leopardref,caption)
|
[
"def",
"listTables",
"(",
"self",
",",
"walkTrace",
"=",
"tuple",
"(",
")",
",",
"case",
"=",
"None",
",",
"element",
"=",
"None",
")",
":",
"if",
"case",
"==",
"'sectionmain'",
":",
"print",
"(",
"walkTrace",
",",
"self",
".",
"title",
")",
"if",
"case",
"==",
"'table'",
":",
"caption",
",",
"tab",
"=",
"element",
"try",
":",
"print",
"(",
"walkTrace",
",",
"tab",
".",
"_leopardref",
",",
"caption",
")",
"except",
"AttributeError",
":",
"tab",
".",
"_leopardref",
"=",
"next",
"(",
"self",
".",
"_reportSection",
".",
"_tabnr",
")",
"print",
"(",
"walkTrace",
",",
"tab",
".",
"_leopardref",
",",
"caption",
")"
] |
List section tables.
|
[
"List",
"section",
"tables",
"."
] |
ee9f45251aaacd1e453b135b419f4f0b50fb036e
|
https://github.com/dicaso/leopard/blob/ee9f45251aaacd1e453b135b419f4f0b50fb036e/leopard/__init__.py#L148-L158
|
242,451
|
dicaso/leopard
|
leopard/__init__.py
|
Section.sectionOutZip
|
def sectionOutZip(self,zipcontainer,zipdir='',figtype='png'):
"""Prepares section for zip output
"""
from io import StringIO, BytesIO
text = self.p if not self.settings['doubleslashnewline'] else self.p.replace('//','\n')
zipcontainer.writestr(
zipdir+'section.txt',
'# {}\n{}'.format(self.title,text).encode()
)
c = count(1)
for ftitle,f in self.figs.items():
figfile = zipdir+'fig{}_{}.{}'.format(next(c),ftitle.replace(' ','_'),figtype)
b = BytesIO()
f.savefig(b,format=figtype,transparent=True)
b.seek(0)
zipcontainer.writestr(figfile,b.getvalue())
c = count(1)
for ttitle,t in self.tabs.items():
b = StringIO()
t.to_csv(b,sep=csvsep,decimal=csvdec)
b.seek(0)
zipcontainer.writestr(
zipdir+'table{}_{}.csv'.format(next(c),ttitle.replace(' ','_')),
b.read().encode()
)
c = count(1)
for s in self.subs:
s.sectionOutZip(zipcontainer,'{}s{}_{}/'.format(zipdir,next(c),s.title.replace(' ','_')),figtype=figtype)
|
python
|
def sectionOutZip(self,zipcontainer,zipdir='',figtype='png'):
"""Prepares section for zip output
"""
from io import StringIO, BytesIO
text = self.p if not self.settings['doubleslashnewline'] else self.p.replace('//','\n')
zipcontainer.writestr(
zipdir+'section.txt',
'# {}\n{}'.format(self.title,text).encode()
)
c = count(1)
for ftitle,f in self.figs.items():
figfile = zipdir+'fig{}_{}.{}'.format(next(c),ftitle.replace(' ','_'),figtype)
b = BytesIO()
f.savefig(b,format=figtype,transparent=True)
b.seek(0)
zipcontainer.writestr(figfile,b.getvalue())
c = count(1)
for ttitle,t in self.tabs.items():
b = StringIO()
t.to_csv(b,sep=csvsep,decimal=csvdec)
b.seek(0)
zipcontainer.writestr(
zipdir+'table{}_{}.csv'.format(next(c),ttitle.replace(' ','_')),
b.read().encode()
)
c = count(1)
for s in self.subs:
s.sectionOutZip(zipcontainer,'{}s{}_{}/'.format(zipdir,next(c),s.title.replace(' ','_')),figtype=figtype)
|
[
"def",
"sectionOutZip",
"(",
"self",
",",
"zipcontainer",
",",
"zipdir",
"=",
"''",
",",
"figtype",
"=",
"'png'",
")",
":",
"from",
"io",
"import",
"StringIO",
",",
"BytesIO",
"text",
"=",
"self",
".",
"p",
"if",
"not",
"self",
".",
"settings",
"[",
"'doubleslashnewline'",
"]",
"else",
"self",
".",
"p",
".",
"replace",
"(",
"'//'",
",",
"'\\n'",
")",
"zipcontainer",
".",
"writestr",
"(",
"zipdir",
"+",
"'section.txt'",
",",
"'# {}\\n{}'",
".",
"format",
"(",
"self",
".",
"title",
",",
"text",
")",
".",
"encode",
"(",
")",
")",
"c",
"=",
"count",
"(",
"1",
")",
"for",
"ftitle",
",",
"f",
"in",
"self",
".",
"figs",
".",
"items",
"(",
")",
":",
"figfile",
"=",
"zipdir",
"+",
"'fig{}_{}.{}'",
".",
"format",
"(",
"next",
"(",
"c",
")",
",",
"ftitle",
".",
"replace",
"(",
"' '",
",",
"'_'",
")",
",",
"figtype",
")",
"b",
"=",
"BytesIO",
"(",
")",
"f",
".",
"savefig",
"(",
"b",
",",
"format",
"=",
"figtype",
",",
"transparent",
"=",
"True",
")",
"b",
".",
"seek",
"(",
"0",
")",
"zipcontainer",
".",
"writestr",
"(",
"figfile",
",",
"b",
".",
"getvalue",
"(",
")",
")",
"c",
"=",
"count",
"(",
"1",
")",
"for",
"ttitle",
",",
"t",
"in",
"self",
".",
"tabs",
".",
"items",
"(",
")",
":",
"b",
"=",
"StringIO",
"(",
")",
"t",
".",
"to_csv",
"(",
"b",
",",
"sep",
"=",
"csvsep",
",",
"decimal",
"=",
"csvdec",
")",
"b",
".",
"seek",
"(",
"0",
")",
"zipcontainer",
".",
"writestr",
"(",
"zipdir",
"+",
"'table{}_{}.csv'",
".",
"format",
"(",
"next",
"(",
"c",
")",
",",
"ttitle",
".",
"replace",
"(",
"' '",
",",
"'_'",
")",
")",
",",
"b",
".",
"read",
"(",
")",
".",
"encode",
"(",
")",
")",
"c",
"=",
"count",
"(",
"1",
")",
"for",
"s",
"in",
"self",
".",
"subs",
":",
"s",
".",
"sectionOutZip",
"(",
"zipcontainer",
",",
"'{}s{}_{}/'",
".",
"format",
"(",
"zipdir",
",",
"next",
"(",
"c",
")",
",",
"s",
".",
"title",
".",
"replace",
"(",
"' '",
",",
"'_'",
")",
")",
",",
"figtype",
"=",
"figtype",
")"
] |
Prepares section for zip output
|
[
"Prepares",
"section",
"for",
"zip",
"output"
] |
ee9f45251aaacd1e453b135b419f4f0b50fb036e
|
https://github.com/dicaso/leopard/blob/ee9f45251aaacd1e453b135b419f4f0b50fb036e/leopard/__init__.py#L160-L187
|
242,452
|
dicaso/leopard
|
leopard/__init__.py
|
Section.sectionsPDF
|
def sectionsPDF(self,walkTrace=tuple(),case=None,element=None,doc=None):
"""Prepares section for PDF output.
"""
import pylatex as pl
if case == 'sectionmain':
if self.settings['clearpage']: doc.append(pl.utils.NoEscape(r'\clearpage'))
with doc.create(pl.Section(self.title) if len(walkTrace) == 1 else
pl.Subsection(self.title) if len(walkTrace) == 2 else
pl.Subsubsection(self.title)):
text = (self.p.replace('\n',' ').replace('//','\n')
if self.settings['doubleslashnewline'] else
renewliner(self.p))
if r'\ref' not in text: doc.append(text)
else:
figrefs = re.compile(r'\\ref\{figref\d+\}')
#latexcode = re.compile(r'&@\\.+')
lastpos = 0
for fr in figrefs.finditer(text):
doc.append(text[lastpos:fr.start()])
doc.append(pl.utils.NoEscape(text[fr.start():fr.end()]))
lastpos = fr.end()
doc.append(text[lastpos:])
if case == 'figure':
width = r'1\textwidth'
figtitle,fig = element
#if fig._suptitle: fig.suptitle('Figure {}: {}'.format(fig.number,fig._suptitle.get_text()))
#figtitle = fig._suptitle.get_text() if fig._suptitle else ''
#fig.suptitle('')
with doc.create(pl.Figure(position='htbp')) as plot:
plt.figure(fig.number)
plot.add_plot(width=pl.NoEscape(width))
plot.add_caption(figtitle)
plot.append(pl.utils.NoEscape(r'\label{figref'+str(fig.number)+r'}'))
#fig.suptitle(figtitle if figtitle else None)
if case == 'table':
caption,t = element
t = pdSeriesToFrame(t) if type(t) == pd.Series else t
if self.settings['tablehead']:
t = t.head(self.settings['tablehead'])
if self.settings['tablecolumns']:
t = t[self.settings['tablecolumns']]
with doc.create(pl.Table(position='ht')) as tablenv:
tablenv.add_caption(caption)
with doc.create(pl.Tabular('r|'+'l'*len(t.columns))) as table:
table.add_hline()
table.add_row(('',)+tuple(t.columns))
for row in t.to_records():
table.add_row(row)
table.add_hline(1)
|
python
|
def sectionsPDF(self,walkTrace=tuple(),case=None,element=None,doc=None):
"""Prepares section for PDF output.
"""
import pylatex as pl
if case == 'sectionmain':
if self.settings['clearpage']: doc.append(pl.utils.NoEscape(r'\clearpage'))
with doc.create(pl.Section(self.title) if len(walkTrace) == 1 else
pl.Subsection(self.title) if len(walkTrace) == 2 else
pl.Subsubsection(self.title)):
text = (self.p.replace('\n',' ').replace('//','\n')
if self.settings['doubleslashnewline'] else
renewliner(self.p))
if r'\ref' not in text: doc.append(text)
else:
figrefs = re.compile(r'\\ref\{figref\d+\}')
#latexcode = re.compile(r'&@\\.+')
lastpos = 0
for fr in figrefs.finditer(text):
doc.append(text[lastpos:fr.start()])
doc.append(pl.utils.NoEscape(text[fr.start():fr.end()]))
lastpos = fr.end()
doc.append(text[lastpos:])
if case == 'figure':
width = r'1\textwidth'
figtitle,fig = element
#if fig._suptitle: fig.suptitle('Figure {}: {}'.format(fig.number,fig._suptitle.get_text()))
#figtitle = fig._suptitle.get_text() if fig._suptitle else ''
#fig.suptitle('')
with doc.create(pl.Figure(position='htbp')) as plot:
plt.figure(fig.number)
plot.add_plot(width=pl.NoEscape(width))
plot.add_caption(figtitle)
plot.append(pl.utils.NoEscape(r'\label{figref'+str(fig.number)+r'}'))
#fig.suptitle(figtitle if figtitle else None)
if case == 'table':
caption,t = element
t = pdSeriesToFrame(t) if type(t) == pd.Series else t
if self.settings['tablehead']:
t = t.head(self.settings['tablehead'])
if self.settings['tablecolumns']:
t = t[self.settings['tablecolumns']]
with doc.create(pl.Table(position='ht')) as tablenv:
tablenv.add_caption(caption)
with doc.create(pl.Tabular('r|'+'l'*len(t.columns))) as table:
table.add_hline()
table.add_row(('',)+tuple(t.columns))
for row in t.to_records():
table.add_row(row)
table.add_hline(1)
|
[
"def",
"sectionsPDF",
"(",
"self",
",",
"walkTrace",
"=",
"tuple",
"(",
")",
",",
"case",
"=",
"None",
",",
"element",
"=",
"None",
",",
"doc",
"=",
"None",
")",
":",
"import",
"pylatex",
"as",
"pl",
"if",
"case",
"==",
"'sectionmain'",
":",
"if",
"self",
".",
"settings",
"[",
"'clearpage'",
"]",
":",
"doc",
".",
"append",
"(",
"pl",
".",
"utils",
".",
"NoEscape",
"(",
"r'\\clearpage'",
")",
")",
"with",
"doc",
".",
"create",
"(",
"pl",
".",
"Section",
"(",
"self",
".",
"title",
")",
"if",
"len",
"(",
"walkTrace",
")",
"==",
"1",
"else",
"pl",
".",
"Subsection",
"(",
"self",
".",
"title",
")",
"if",
"len",
"(",
"walkTrace",
")",
"==",
"2",
"else",
"pl",
".",
"Subsubsection",
"(",
"self",
".",
"title",
")",
")",
":",
"text",
"=",
"(",
"self",
".",
"p",
".",
"replace",
"(",
"'\\n'",
",",
"' '",
")",
".",
"replace",
"(",
"'//'",
",",
"'\\n'",
")",
"if",
"self",
".",
"settings",
"[",
"'doubleslashnewline'",
"]",
"else",
"renewliner",
"(",
"self",
".",
"p",
")",
")",
"if",
"r'\\ref'",
"not",
"in",
"text",
":",
"doc",
".",
"append",
"(",
"text",
")",
"else",
":",
"figrefs",
"=",
"re",
".",
"compile",
"(",
"r'\\\\ref\\{figref\\d+\\}'",
")",
"#latexcode = re.compile(r'&@\\\\.+')",
"lastpos",
"=",
"0",
"for",
"fr",
"in",
"figrefs",
".",
"finditer",
"(",
"text",
")",
":",
"doc",
".",
"append",
"(",
"text",
"[",
"lastpos",
":",
"fr",
".",
"start",
"(",
")",
"]",
")",
"doc",
".",
"append",
"(",
"pl",
".",
"utils",
".",
"NoEscape",
"(",
"text",
"[",
"fr",
".",
"start",
"(",
")",
":",
"fr",
".",
"end",
"(",
")",
"]",
")",
")",
"lastpos",
"=",
"fr",
".",
"end",
"(",
")",
"doc",
".",
"append",
"(",
"text",
"[",
"lastpos",
":",
"]",
")",
"if",
"case",
"==",
"'figure'",
":",
"width",
"=",
"r'1\\textwidth'",
"figtitle",
",",
"fig",
"=",
"element",
"#if fig._suptitle: fig.suptitle('Figure {}: {}'.format(fig.number,fig._suptitle.get_text()))",
"#figtitle = fig._suptitle.get_text() if fig._suptitle else ''",
"#fig.suptitle('')",
"with",
"doc",
".",
"create",
"(",
"pl",
".",
"Figure",
"(",
"position",
"=",
"'htbp'",
")",
")",
"as",
"plot",
":",
"plt",
".",
"figure",
"(",
"fig",
".",
"number",
")",
"plot",
".",
"add_plot",
"(",
"width",
"=",
"pl",
".",
"NoEscape",
"(",
"width",
")",
")",
"plot",
".",
"add_caption",
"(",
"figtitle",
")",
"plot",
".",
"append",
"(",
"pl",
".",
"utils",
".",
"NoEscape",
"(",
"r'\\label{figref'",
"+",
"str",
"(",
"fig",
".",
"number",
")",
"+",
"r'}'",
")",
")",
"#fig.suptitle(figtitle if figtitle else None)",
"if",
"case",
"==",
"'table'",
":",
"caption",
",",
"t",
"=",
"element",
"t",
"=",
"pdSeriesToFrame",
"(",
"t",
")",
"if",
"type",
"(",
"t",
")",
"==",
"pd",
".",
"Series",
"else",
"t",
"if",
"self",
".",
"settings",
"[",
"'tablehead'",
"]",
":",
"t",
"=",
"t",
".",
"head",
"(",
"self",
".",
"settings",
"[",
"'tablehead'",
"]",
")",
"if",
"self",
".",
"settings",
"[",
"'tablecolumns'",
"]",
":",
"t",
"=",
"t",
"[",
"self",
".",
"settings",
"[",
"'tablecolumns'",
"]",
"]",
"with",
"doc",
".",
"create",
"(",
"pl",
".",
"Table",
"(",
"position",
"=",
"'ht'",
")",
")",
"as",
"tablenv",
":",
"tablenv",
".",
"add_caption",
"(",
"caption",
")",
"with",
"doc",
".",
"create",
"(",
"pl",
".",
"Tabular",
"(",
"'r|'",
"+",
"'l'",
"*",
"len",
"(",
"t",
".",
"columns",
")",
")",
")",
"as",
"table",
":",
"table",
".",
"add_hline",
"(",
")",
"table",
".",
"add_row",
"(",
"(",
"''",
",",
")",
"+",
"tuple",
"(",
"t",
".",
"columns",
")",
")",
"for",
"row",
"in",
"t",
".",
"to_records",
"(",
")",
":",
"table",
".",
"add_row",
"(",
"row",
")",
"table",
".",
"add_hline",
"(",
"1",
")"
] |
Prepares section for PDF output.
|
[
"Prepares",
"section",
"for",
"PDF",
"output",
"."
] |
ee9f45251aaacd1e453b135b419f4f0b50fb036e
|
https://github.com/dicaso/leopard/blob/ee9f45251aaacd1e453b135b419f4f0b50fb036e/leopard/__init__.py#L190-L240
|
242,453
|
dicaso/leopard
|
leopard/__init__.py
|
Section.sectionsWord
|
def sectionsWord(self,walkTrace=tuple(),case=None,element=None,doc=None):
"""Prepares section for word output.
"""
from docx.shared import Inches
from io import BytesIO
#p.add_run('italic.').italic = True
if case == 'sectionmain':
if self.settings['clearpage']: doc.add_page_break()
doc.add_heading(self.title, level = len(walkTrace))
for p in renewliner(self.p).split('\n'):
doc.add_paragraph(p)
if case == 'figure':
bf=BytesIO()
figtitle,fig = element
width = fig.get_size_inches()[0]
width = Inches(width if width < 6 else 6)
fig.savefig(bf)
doc.add_picture(bf, width=width)
doc.add_heading('Figure {}: {}'.format(
fig._leopardref,
figtitle),level=6)
if case == 'table':
caption,t = element
tableref = t._leopardref
t = pdSeriesToFrame(t) if type(t) == pd.Series else t
if self.settings['tablehead']:
t = t.head(self.settings['tablehead'])
if self.settings['tablecolumns']:
t = t[self.settings['tablecolumns']]
doc.add_heading('Table {}: {}'.format(
tableref,
caption),level=6)
table = doc.add_table(t.shape[0]+1,t.shape[1]+1)
for tcell,col in zip(table.rows[0].cells[1:],t.columns):
tcell.text = str(col)
for trow,rrow in zip(table.rows[1:],t.to_records()):
for tcell,rcell in zip(trow.cells,rrow):
tcell.text = str(rcell)
|
python
|
def sectionsWord(self,walkTrace=tuple(),case=None,element=None,doc=None):
"""Prepares section for word output.
"""
from docx.shared import Inches
from io import BytesIO
#p.add_run('italic.').italic = True
if case == 'sectionmain':
if self.settings['clearpage']: doc.add_page_break()
doc.add_heading(self.title, level = len(walkTrace))
for p in renewliner(self.p).split('\n'):
doc.add_paragraph(p)
if case == 'figure':
bf=BytesIO()
figtitle,fig = element
width = fig.get_size_inches()[0]
width = Inches(width if width < 6 else 6)
fig.savefig(bf)
doc.add_picture(bf, width=width)
doc.add_heading('Figure {}: {}'.format(
fig._leopardref,
figtitle),level=6)
if case == 'table':
caption,t = element
tableref = t._leopardref
t = pdSeriesToFrame(t) if type(t) == pd.Series else t
if self.settings['tablehead']:
t = t.head(self.settings['tablehead'])
if self.settings['tablecolumns']:
t = t[self.settings['tablecolumns']]
doc.add_heading('Table {}: {}'.format(
tableref,
caption),level=6)
table = doc.add_table(t.shape[0]+1,t.shape[1]+1)
for tcell,col in zip(table.rows[0].cells[1:],t.columns):
tcell.text = str(col)
for trow,rrow in zip(table.rows[1:],t.to_records()):
for tcell,rcell in zip(trow.cells,rrow):
tcell.text = str(rcell)
|
[
"def",
"sectionsWord",
"(",
"self",
",",
"walkTrace",
"=",
"tuple",
"(",
")",
",",
"case",
"=",
"None",
",",
"element",
"=",
"None",
",",
"doc",
"=",
"None",
")",
":",
"from",
"docx",
".",
"shared",
"import",
"Inches",
"from",
"io",
"import",
"BytesIO",
"#p.add_run('italic.').italic = True",
"if",
"case",
"==",
"'sectionmain'",
":",
"if",
"self",
".",
"settings",
"[",
"'clearpage'",
"]",
":",
"doc",
".",
"add_page_break",
"(",
")",
"doc",
".",
"add_heading",
"(",
"self",
".",
"title",
",",
"level",
"=",
"len",
"(",
"walkTrace",
")",
")",
"for",
"p",
"in",
"renewliner",
"(",
"self",
".",
"p",
")",
".",
"split",
"(",
"'\\n'",
")",
":",
"doc",
".",
"add_paragraph",
"(",
"p",
")",
"if",
"case",
"==",
"'figure'",
":",
"bf",
"=",
"BytesIO",
"(",
")",
"figtitle",
",",
"fig",
"=",
"element",
"width",
"=",
"fig",
".",
"get_size_inches",
"(",
")",
"[",
"0",
"]",
"width",
"=",
"Inches",
"(",
"width",
"if",
"width",
"<",
"6",
"else",
"6",
")",
"fig",
".",
"savefig",
"(",
"bf",
")",
"doc",
".",
"add_picture",
"(",
"bf",
",",
"width",
"=",
"width",
")",
"doc",
".",
"add_heading",
"(",
"'Figure {}: {}'",
".",
"format",
"(",
"fig",
".",
"_leopardref",
",",
"figtitle",
")",
",",
"level",
"=",
"6",
")",
"if",
"case",
"==",
"'table'",
":",
"caption",
",",
"t",
"=",
"element",
"tableref",
"=",
"t",
".",
"_leopardref",
"t",
"=",
"pdSeriesToFrame",
"(",
"t",
")",
"if",
"type",
"(",
"t",
")",
"==",
"pd",
".",
"Series",
"else",
"t",
"if",
"self",
".",
"settings",
"[",
"'tablehead'",
"]",
":",
"t",
"=",
"t",
".",
"head",
"(",
"self",
".",
"settings",
"[",
"'tablehead'",
"]",
")",
"if",
"self",
".",
"settings",
"[",
"'tablecolumns'",
"]",
":",
"t",
"=",
"t",
"[",
"self",
".",
"settings",
"[",
"'tablecolumns'",
"]",
"]",
"doc",
".",
"add_heading",
"(",
"'Table {}: {}'",
".",
"format",
"(",
"tableref",
",",
"caption",
")",
",",
"level",
"=",
"6",
")",
"table",
"=",
"doc",
".",
"add_table",
"(",
"t",
".",
"shape",
"[",
"0",
"]",
"+",
"1",
",",
"t",
".",
"shape",
"[",
"1",
"]",
"+",
"1",
")",
"for",
"tcell",
",",
"col",
"in",
"zip",
"(",
"table",
".",
"rows",
"[",
"0",
"]",
".",
"cells",
"[",
"1",
":",
"]",
",",
"t",
".",
"columns",
")",
":",
"tcell",
".",
"text",
"=",
"str",
"(",
"col",
")",
"for",
"trow",
",",
"rrow",
"in",
"zip",
"(",
"table",
".",
"rows",
"[",
"1",
":",
"]",
",",
"t",
".",
"to_records",
"(",
")",
")",
":",
"for",
"tcell",
",",
"rcell",
"in",
"zip",
"(",
"trow",
".",
"cells",
",",
"rrow",
")",
":",
"tcell",
".",
"text",
"=",
"str",
"(",
"rcell",
")"
] |
Prepares section for word output.
|
[
"Prepares",
"section",
"for",
"word",
"output",
"."
] |
ee9f45251aaacd1e453b135b419f4f0b50fb036e
|
https://github.com/dicaso/leopard/blob/ee9f45251aaacd1e453b135b419f4f0b50fb036e/leopard/__init__.py#L244-L286
|
242,454
|
dicaso/leopard
|
leopard/__init__.py
|
Section.sectionFromFunction
|
def sectionFromFunction(function,*args,**kwargs):
"""
This staticmethod executes the function that is passed with the provided args and kwargs.
The first line of the function docstring is used as the section title, the comments
within the function body are parsed and added as the section text.
The function should return an ordered dict of figures and tables, that are then
attached to the section.
Args:
function (function): The function that generates the section content.
Returns:
Section
>>> # Section title of example function
... def exampleFunction(a,b=None):
... 'Mock figures and tables included'
... figures = (('fig1',Mock()),('fig2',Mock()))
... tables = (('tab1',Mock()),('tab2',Mock()))
... return figures, tables
>>> Section.sectionFromFunction(exampleFunction,Mock(),b=Mock())
<Section @ Section title of example function>
"""
figures, tables = function(*args,**kwargs)
title = inspect.getcomments(function)[1:].strip()
text = inspect.getdoc(function)
code = inspect.getsource(function)
return Section(title=title,text=text,figures=figures,tables=tables,code=code)
|
python
|
def sectionFromFunction(function,*args,**kwargs):
"""
This staticmethod executes the function that is passed with the provided args and kwargs.
The first line of the function docstring is used as the section title, the comments
within the function body are parsed and added as the section text.
The function should return an ordered dict of figures and tables, that are then
attached to the section.
Args:
function (function): The function that generates the section content.
Returns:
Section
>>> # Section title of example function
... def exampleFunction(a,b=None):
... 'Mock figures and tables included'
... figures = (('fig1',Mock()),('fig2',Mock()))
... tables = (('tab1',Mock()),('tab2',Mock()))
... return figures, tables
>>> Section.sectionFromFunction(exampleFunction,Mock(),b=Mock())
<Section @ Section title of example function>
"""
figures, tables = function(*args,**kwargs)
title = inspect.getcomments(function)[1:].strip()
text = inspect.getdoc(function)
code = inspect.getsource(function)
return Section(title=title,text=text,figures=figures,tables=tables,code=code)
|
[
"def",
"sectionFromFunction",
"(",
"function",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"figures",
",",
"tables",
"=",
"function",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"title",
"=",
"inspect",
".",
"getcomments",
"(",
"function",
")",
"[",
"1",
":",
"]",
".",
"strip",
"(",
")",
"text",
"=",
"inspect",
".",
"getdoc",
"(",
"function",
")",
"code",
"=",
"inspect",
".",
"getsource",
"(",
"function",
")",
"return",
"Section",
"(",
"title",
"=",
"title",
",",
"text",
"=",
"text",
",",
"figures",
"=",
"figures",
",",
"tables",
"=",
"tables",
",",
"code",
"=",
"code",
")"
] |
This staticmethod executes the function that is passed with the provided args and kwargs.
The first line of the function docstring is used as the section title, the comments
within the function body are parsed and added as the section text.
The function should return an ordered dict of figures and tables, that are then
attached to the section.
Args:
function (function): The function that generates the section content.
Returns:
Section
>>> # Section title of example function
... def exampleFunction(a,b=None):
... 'Mock figures and tables included'
... figures = (('fig1',Mock()),('fig2',Mock()))
... tables = (('tab1',Mock()),('tab2',Mock()))
... return figures, tables
>>> Section.sectionFromFunction(exampleFunction,Mock(),b=Mock())
<Section @ Section title of example function>
|
[
"This",
"staticmethod",
"executes",
"the",
"function",
"that",
"is",
"passed",
"with",
"the",
"provided",
"args",
"and",
"kwargs",
".",
"The",
"first",
"line",
"of",
"the",
"function",
"docstring",
"is",
"used",
"as",
"the",
"section",
"title",
"the",
"comments",
"within",
"the",
"function",
"body",
"are",
"parsed",
"and",
"added",
"as",
"the",
"section",
"text",
".",
"The",
"function",
"should",
"return",
"an",
"ordered",
"dict",
"of",
"figures",
"and",
"tables",
"that",
"are",
"then",
"attached",
"to",
"the",
"section",
"."
] |
ee9f45251aaacd1e453b135b419f4f0b50fb036e
|
https://github.com/dicaso/leopard/blob/ee9f45251aaacd1e453b135b419f4f0b50fb036e/leopard/__init__.py#L289-L316
|
242,455
|
dicaso/leopard
|
leopard/__init__.py
|
Report.list
|
def list(self):
"""
Get an overview of the report content list
"""
for i in range(len(self.sections)):
self.sections[i].list(walkTrace=(i+1,))
|
python
|
def list(self):
"""
Get an overview of the report content list
"""
for i in range(len(self.sections)):
self.sections[i].list(walkTrace=(i+1,))
|
[
"def",
"list",
"(",
"self",
")",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"sections",
")",
")",
":",
"self",
".",
"sections",
"[",
"i",
"]",
".",
"list",
"(",
"walkTrace",
"=",
"(",
"i",
"+",
"1",
",",
")",
")"
] |
Get an overview of the report content list
|
[
"Get",
"an",
"overview",
"of",
"the",
"report",
"content",
"list"
] |
ee9f45251aaacd1e453b135b419f4f0b50fb036e
|
https://github.com/dicaso/leopard/blob/ee9f45251aaacd1e453b135b419f4f0b50fb036e/leopard/__init__.py#L357-L362
|
242,456
|
dicaso/leopard
|
leopard/__init__.py
|
Report.outputZip
|
def outputZip(self,figtype='png'):
"""
Outputs the report in a zip container.
Figs and tabs as pngs and excells.
Args:
figtype (str): Figure type of images in the zip folder.
"""
from zipfile import ZipFile
with ZipFile(self.outfile+'.zip', 'w') as zipcontainer:
zipcontainer.writestr(
'summary.txt',
'# {}\n\n{}\n{}'.format(
self.title,
self.p,
('\n## Conclusion\n' if self.conclusion else '')+self.conclusion
).encode()
)
c = count(1)
for section in self.sections:
section.sectionOutZip(zipcontainer,'s{}_{}/'.format(next(c),section.title.replace(' ','_')),
figtype=figtype)
|
python
|
def outputZip(self,figtype='png'):
"""
Outputs the report in a zip container.
Figs and tabs as pngs and excells.
Args:
figtype (str): Figure type of images in the zip folder.
"""
from zipfile import ZipFile
with ZipFile(self.outfile+'.zip', 'w') as zipcontainer:
zipcontainer.writestr(
'summary.txt',
'# {}\n\n{}\n{}'.format(
self.title,
self.p,
('\n## Conclusion\n' if self.conclusion else '')+self.conclusion
).encode()
)
c = count(1)
for section in self.sections:
section.sectionOutZip(zipcontainer,'s{}_{}/'.format(next(c),section.title.replace(' ','_')),
figtype=figtype)
|
[
"def",
"outputZip",
"(",
"self",
",",
"figtype",
"=",
"'png'",
")",
":",
"from",
"zipfile",
"import",
"ZipFile",
"with",
"ZipFile",
"(",
"self",
".",
"outfile",
"+",
"'.zip'",
",",
"'w'",
")",
"as",
"zipcontainer",
":",
"zipcontainer",
".",
"writestr",
"(",
"'summary.txt'",
",",
"'# {}\\n\\n{}\\n{}'",
".",
"format",
"(",
"self",
".",
"title",
",",
"self",
".",
"p",
",",
"(",
"'\\n## Conclusion\\n'",
"if",
"self",
".",
"conclusion",
"else",
"''",
")",
"+",
"self",
".",
"conclusion",
")",
".",
"encode",
"(",
")",
")",
"c",
"=",
"count",
"(",
"1",
")",
"for",
"section",
"in",
"self",
".",
"sections",
":",
"section",
".",
"sectionOutZip",
"(",
"zipcontainer",
",",
"'s{}_{}/'",
".",
"format",
"(",
"next",
"(",
"c",
")",
",",
"section",
".",
"title",
".",
"replace",
"(",
"' '",
",",
"'_'",
")",
")",
",",
"figtype",
"=",
"figtype",
")"
] |
Outputs the report in a zip container.
Figs and tabs as pngs and excells.
Args:
figtype (str): Figure type of images in the zip folder.
|
[
"Outputs",
"the",
"report",
"in",
"a",
"zip",
"container",
".",
"Figs",
"and",
"tabs",
"as",
"pngs",
"and",
"excells",
"."
] |
ee9f45251aaacd1e453b135b419f4f0b50fb036e
|
https://github.com/dicaso/leopard/blob/ee9f45251aaacd1e453b135b419f4f0b50fb036e/leopard/__init__.py#L364-L385
|
242,457
|
dicaso/leopard
|
leopard/__init__.py
|
Report.outputWord
|
def outputWord(self):
"""Output report to word docx
"""
import docx
from docx.enum.text import WD_ALIGN_PARAGRAPH
doc = docx.Document()
doc.styles['Normal'].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY
doc.add_heading(self.title, level=0)
if self.addTime:
from time import localtime, strftime
doc.add_heading(strftime("%Y-%m-%d %H:%M:%S", localtime()), level=1)
# Append introduction
if self.p:
doc.add_heading('Introduction',level=1)
for p in renewliner(self.p).split('\n'):
doc.add_paragraph(p)
# Sections
c = count(1)
#Prepare fig and table numbers
self.listFigures(tuple())
self.listTables(tuple())
for section in self.sections:
section.sectionsWord((next(c),),doc=doc)
# Append conclusion
if self.conclusion:
doc.add_heading('Conclusion', level=1)
for p in renewliner(self.conclusion).split('\n'):
doc.add_paragraph(p)
# Generate Word document
doc.save(self.outfile+'.docx')
|
python
|
def outputWord(self):
"""Output report to word docx
"""
import docx
from docx.enum.text import WD_ALIGN_PARAGRAPH
doc = docx.Document()
doc.styles['Normal'].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY
doc.add_heading(self.title, level=0)
if self.addTime:
from time import localtime, strftime
doc.add_heading(strftime("%Y-%m-%d %H:%M:%S", localtime()), level=1)
# Append introduction
if self.p:
doc.add_heading('Introduction',level=1)
for p in renewliner(self.p).split('\n'):
doc.add_paragraph(p)
# Sections
c = count(1)
#Prepare fig and table numbers
self.listFigures(tuple())
self.listTables(tuple())
for section in self.sections:
section.sectionsWord((next(c),),doc=doc)
# Append conclusion
if self.conclusion:
doc.add_heading('Conclusion', level=1)
for p in renewliner(self.conclusion).split('\n'):
doc.add_paragraph(p)
# Generate Word document
doc.save(self.outfile+'.docx')
|
[
"def",
"outputWord",
"(",
"self",
")",
":",
"import",
"docx",
"from",
"docx",
".",
"enum",
".",
"text",
"import",
"WD_ALIGN_PARAGRAPH",
"doc",
"=",
"docx",
".",
"Document",
"(",
")",
"doc",
".",
"styles",
"[",
"'Normal'",
"]",
".",
"paragraph_format",
".",
"alignment",
"=",
"WD_ALIGN_PARAGRAPH",
".",
"JUSTIFY",
"doc",
".",
"add_heading",
"(",
"self",
".",
"title",
",",
"level",
"=",
"0",
")",
"if",
"self",
".",
"addTime",
":",
"from",
"time",
"import",
"localtime",
",",
"strftime",
"doc",
".",
"add_heading",
"(",
"strftime",
"(",
"\"%Y-%m-%d %H:%M:%S\"",
",",
"localtime",
"(",
")",
")",
",",
"level",
"=",
"1",
")",
"# Append introduction",
"if",
"self",
".",
"p",
":",
"doc",
".",
"add_heading",
"(",
"'Introduction'",
",",
"level",
"=",
"1",
")",
"for",
"p",
"in",
"renewliner",
"(",
"self",
".",
"p",
")",
".",
"split",
"(",
"'\\n'",
")",
":",
"doc",
".",
"add_paragraph",
"(",
"p",
")",
"# Sections",
"c",
"=",
"count",
"(",
"1",
")",
"#Prepare fig and table numbers",
"self",
".",
"listFigures",
"(",
"tuple",
"(",
")",
")",
"self",
".",
"listTables",
"(",
"tuple",
"(",
")",
")",
"for",
"section",
"in",
"self",
".",
"sections",
":",
"section",
".",
"sectionsWord",
"(",
"(",
"next",
"(",
"c",
")",
",",
")",
",",
"doc",
"=",
"doc",
")",
"# Append conclusion",
"if",
"self",
".",
"conclusion",
":",
"doc",
".",
"add_heading",
"(",
"'Conclusion'",
",",
"level",
"=",
"1",
")",
"for",
"p",
"in",
"renewliner",
"(",
"self",
".",
"conclusion",
")",
".",
"split",
"(",
"'\\n'",
")",
":",
"doc",
".",
"add_paragraph",
"(",
"p",
")",
"# Generate Word document",
"doc",
".",
"save",
"(",
"self",
".",
"outfile",
"+",
"'.docx'",
")"
] |
Output report to word docx
|
[
"Output",
"report",
"to",
"word",
"docx"
] |
ee9f45251aaacd1e453b135b419f4f0b50fb036e
|
https://github.com/dicaso/leopard/blob/ee9f45251aaacd1e453b135b419f4f0b50fb036e/leopard/__init__.py#L424-L459
|
242,458
|
dicaso/leopard
|
leopard/__init__.py
|
Report.getReportTable
|
def getReportTable(reportzipfile,tablefilename,inReportsDir=True,verbose=False):
"""Get a pandas table from a previous report
Args:
reportzipfile (str): Zip folder location, '.zip' extension is optional.
tablefilename (str or list int): Table location within the zip folder.
Can be provided as the filename within the zip folder, or a list of integers
indicating its exact position (1-indexed). If you provide an empty string or
list, all available table filenames in the zip folder will be printed.
inReportsDir (bool): Search reportzipfile relative to reportsDir.
Returns:
pd.DataFrame
"""
import zipfile, io, re
# zipfilename preparation
if not reportzipfile.endswith('.zip'): reportzipfile+='.zip'
if inReportsDir: reportzipfile = os.path.join(reportsDir,reportzipfile)
with zipfile.ZipFile(reportzipfile) as z:
# print all table filenames if tablefilename is not provided
if not tablefilename:
for f in z.filelist:
if 'table' in f.filename: print(f.filename)
return
# tablefilename preparation if int list
if isinstance(tablefilename,list):
tablelocation = tablefilename
tablefilename = None
location = re.compile(r'(s|table)(\d+)_')
for f in z.filelist:
if 'table' not in f.filename or f.filename.count('/') != (len(tablelocation)-1): continue
if [int(location.match(s).groups()[1]) for s in f.filename.split('/')] == tablelocation:
tablefilename = f.filename
if verbose: print('Loading',tablefilename)
break
if tablefilename is None: raise FileNotFoundError('Table location not found in zip folder.')
# read table
with z.open(tablefilename) as f:
ft = io.TextIOWrapper(f)
return pd.read_csv(ft,index_col=0,sep=csvsep,decimal=csvdec)
|
python
|
def getReportTable(reportzipfile,tablefilename,inReportsDir=True,verbose=False):
"""Get a pandas table from a previous report
Args:
reportzipfile (str): Zip folder location, '.zip' extension is optional.
tablefilename (str or list int): Table location within the zip folder.
Can be provided as the filename within the zip folder, or a list of integers
indicating its exact position (1-indexed). If you provide an empty string or
list, all available table filenames in the zip folder will be printed.
inReportsDir (bool): Search reportzipfile relative to reportsDir.
Returns:
pd.DataFrame
"""
import zipfile, io, re
# zipfilename preparation
if not reportzipfile.endswith('.zip'): reportzipfile+='.zip'
if inReportsDir: reportzipfile = os.path.join(reportsDir,reportzipfile)
with zipfile.ZipFile(reportzipfile) as z:
# print all table filenames if tablefilename is not provided
if not tablefilename:
for f in z.filelist:
if 'table' in f.filename: print(f.filename)
return
# tablefilename preparation if int list
if isinstance(tablefilename,list):
tablelocation = tablefilename
tablefilename = None
location = re.compile(r'(s|table)(\d+)_')
for f in z.filelist:
if 'table' not in f.filename or f.filename.count('/') != (len(tablelocation)-1): continue
if [int(location.match(s).groups()[1]) for s in f.filename.split('/')] == tablelocation:
tablefilename = f.filename
if verbose: print('Loading',tablefilename)
break
if tablefilename is None: raise FileNotFoundError('Table location not found in zip folder.')
# read table
with z.open(tablefilename) as f:
ft = io.TextIOWrapper(f)
return pd.read_csv(ft,index_col=0,sep=csvsep,decimal=csvdec)
|
[
"def",
"getReportTable",
"(",
"reportzipfile",
",",
"tablefilename",
",",
"inReportsDir",
"=",
"True",
",",
"verbose",
"=",
"False",
")",
":",
"import",
"zipfile",
",",
"io",
",",
"re",
"# zipfilename preparation",
"if",
"not",
"reportzipfile",
".",
"endswith",
"(",
"'.zip'",
")",
":",
"reportzipfile",
"+=",
"'.zip'",
"if",
"inReportsDir",
":",
"reportzipfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"reportsDir",
",",
"reportzipfile",
")",
"with",
"zipfile",
".",
"ZipFile",
"(",
"reportzipfile",
")",
"as",
"z",
":",
"# print all table filenames if tablefilename is not provided",
"if",
"not",
"tablefilename",
":",
"for",
"f",
"in",
"z",
".",
"filelist",
":",
"if",
"'table'",
"in",
"f",
".",
"filename",
":",
"print",
"(",
"f",
".",
"filename",
")",
"return",
"# tablefilename preparation if int list",
"if",
"isinstance",
"(",
"tablefilename",
",",
"list",
")",
":",
"tablelocation",
"=",
"tablefilename",
"tablefilename",
"=",
"None",
"location",
"=",
"re",
".",
"compile",
"(",
"r'(s|table)(\\d+)_'",
")",
"for",
"f",
"in",
"z",
".",
"filelist",
":",
"if",
"'table'",
"not",
"in",
"f",
".",
"filename",
"or",
"f",
".",
"filename",
".",
"count",
"(",
"'/'",
")",
"!=",
"(",
"len",
"(",
"tablelocation",
")",
"-",
"1",
")",
":",
"continue",
"if",
"[",
"int",
"(",
"location",
".",
"match",
"(",
"s",
")",
".",
"groups",
"(",
")",
"[",
"1",
"]",
")",
"for",
"s",
"in",
"f",
".",
"filename",
".",
"split",
"(",
"'/'",
")",
"]",
"==",
"tablelocation",
":",
"tablefilename",
"=",
"f",
".",
"filename",
"if",
"verbose",
":",
"print",
"(",
"'Loading'",
",",
"tablefilename",
")",
"break",
"if",
"tablefilename",
"is",
"None",
":",
"raise",
"FileNotFoundError",
"(",
"'Table location not found in zip folder.'",
")",
"# read table",
"with",
"z",
".",
"open",
"(",
"tablefilename",
")",
"as",
"f",
":",
"ft",
"=",
"io",
".",
"TextIOWrapper",
"(",
"f",
")",
"return",
"pd",
".",
"read_csv",
"(",
"ft",
",",
"index_col",
"=",
"0",
",",
"sep",
"=",
"csvsep",
",",
"decimal",
"=",
"csvdec",
")"
] |
Get a pandas table from a previous report
Args:
reportzipfile (str): Zip folder location, '.zip' extension is optional.
tablefilename (str or list int): Table location within the zip folder.
Can be provided as the filename within the zip folder, or a list of integers
indicating its exact position (1-indexed). If you provide an empty string or
list, all available table filenames in the zip folder will be printed.
inReportsDir (bool): Search reportzipfile relative to reportsDir.
Returns:
pd.DataFrame
|
[
"Get",
"a",
"pandas",
"table",
"from",
"a",
"previous",
"report"
] |
ee9f45251aaacd1e453b135b419f4f0b50fb036e
|
https://github.com/dicaso/leopard/blob/ee9f45251aaacd1e453b135b419f4f0b50fb036e/leopard/__init__.py#L462-L502
|
242,459
|
cdeboever3/cdpybio
|
cdpybio/general.py
|
transform_standard_normal
|
def transform_standard_normal(df):
"""Transform a series or the rows of a dataframe to the values of a standard
normal based on rank."""
import pandas as pd
import scipy.stats as stats
if type(df) == pd.core.frame.DataFrame:
gc_ranks = df.rank(axis=1)
gc_ranks = gc_ranks / (gc_ranks.shape[1] + 1)
std_norm = stats.norm.ppf(gc_ranks)
std_norm = pd.DataFrame(std_norm, index=gc_ranks.index,
columns=gc_ranks.columns)
elif type(df) == pd.core.series.Series:
gc_ranks = df.rank()
gc_ranks = gc_ranks / (gc_ranks.shape[0] + 1)
std_norm = stats.norm.ppf(gc_ranks)
std_norm = pd.Series(std_norm, index=df.index)
return std_norm
|
python
|
def transform_standard_normal(df):
"""Transform a series or the rows of a dataframe to the values of a standard
normal based on rank."""
import pandas as pd
import scipy.stats as stats
if type(df) == pd.core.frame.DataFrame:
gc_ranks = df.rank(axis=1)
gc_ranks = gc_ranks / (gc_ranks.shape[1] + 1)
std_norm = stats.norm.ppf(gc_ranks)
std_norm = pd.DataFrame(std_norm, index=gc_ranks.index,
columns=gc_ranks.columns)
elif type(df) == pd.core.series.Series:
gc_ranks = df.rank()
gc_ranks = gc_ranks / (gc_ranks.shape[0] + 1)
std_norm = stats.norm.ppf(gc_ranks)
std_norm = pd.Series(std_norm, index=df.index)
return std_norm
|
[
"def",
"transform_standard_normal",
"(",
"df",
")",
":",
"import",
"pandas",
"as",
"pd",
"import",
"scipy",
".",
"stats",
"as",
"stats",
"if",
"type",
"(",
"df",
")",
"==",
"pd",
".",
"core",
".",
"frame",
".",
"DataFrame",
":",
"gc_ranks",
"=",
"df",
".",
"rank",
"(",
"axis",
"=",
"1",
")",
"gc_ranks",
"=",
"gc_ranks",
"/",
"(",
"gc_ranks",
".",
"shape",
"[",
"1",
"]",
"+",
"1",
")",
"std_norm",
"=",
"stats",
".",
"norm",
".",
"ppf",
"(",
"gc_ranks",
")",
"std_norm",
"=",
"pd",
".",
"DataFrame",
"(",
"std_norm",
",",
"index",
"=",
"gc_ranks",
".",
"index",
",",
"columns",
"=",
"gc_ranks",
".",
"columns",
")",
"elif",
"type",
"(",
"df",
")",
"==",
"pd",
".",
"core",
".",
"series",
".",
"Series",
":",
"gc_ranks",
"=",
"df",
".",
"rank",
"(",
")",
"gc_ranks",
"=",
"gc_ranks",
"/",
"(",
"gc_ranks",
".",
"shape",
"[",
"0",
"]",
"+",
"1",
")",
"std_norm",
"=",
"stats",
".",
"norm",
".",
"ppf",
"(",
"gc_ranks",
")",
"std_norm",
"=",
"pd",
".",
"Series",
"(",
"std_norm",
",",
"index",
"=",
"df",
".",
"index",
")",
"return",
"std_norm"
] |
Transform a series or the rows of a dataframe to the values of a standard
normal based on rank.
|
[
"Transform",
"a",
"series",
"or",
"the",
"rows",
"of",
"a",
"dataframe",
"to",
"the",
"values",
"of",
"a",
"standard",
"normal",
"based",
"on",
"rank",
"."
] |
38efdf0e11d01bc00a135921cb91a19c03db5d5c
|
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/general.py#L53-L69
|
242,460
|
cdeboever3/cdpybio
|
cdpybio/general.py
|
read_gzipped_text_url
|
def read_gzipped_text_url(url):
"""Read a gzipped text file from a URL and return
contents as a string."""
import urllib2
import zlib
from StringIO import StringIO
opener = urllib2.build_opener()
request = urllib2.Request(url)
request.add_header('Accept-encoding', 'gzip')
respond = opener.open(request)
compressedData = respond.read()
respond.close()
opener.close()
compressedDataBuf = StringIO(compressedData)
d = zlib.decompressobj(16+zlib.MAX_WBITS)
buffer = compressedDataBuf.read(1024)
#saveFile = open('/tmp/test.txt', "wb")
s = []
while buffer:
s.append(d.decompress(buffer))
buffer = compressedDataBuf.read(1024)
s = ''.join(s)
return s
|
python
|
def read_gzipped_text_url(url):
"""Read a gzipped text file from a URL and return
contents as a string."""
import urllib2
import zlib
from StringIO import StringIO
opener = urllib2.build_opener()
request = urllib2.Request(url)
request.add_header('Accept-encoding', 'gzip')
respond = opener.open(request)
compressedData = respond.read()
respond.close()
opener.close()
compressedDataBuf = StringIO(compressedData)
d = zlib.decompressobj(16+zlib.MAX_WBITS)
buffer = compressedDataBuf.read(1024)
#saveFile = open('/tmp/test.txt', "wb")
s = []
while buffer:
s.append(d.decompress(buffer))
buffer = compressedDataBuf.read(1024)
s = ''.join(s)
return s
|
[
"def",
"read_gzipped_text_url",
"(",
"url",
")",
":",
"import",
"urllib2",
"import",
"zlib",
"from",
"StringIO",
"import",
"StringIO",
"opener",
"=",
"urllib2",
".",
"build_opener",
"(",
")",
"request",
"=",
"urllib2",
".",
"Request",
"(",
"url",
")",
"request",
".",
"add_header",
"(",
"'Accept-encoding'",
",",
"'gzip'",
")",
"respond",
"=",
"opener",
".",
"open",
"(",
"request",
")",
"compressedData",
"=",
"respond",
".",
"read",
"(",
")",
"respond",
".",
"close",
"(",
")",
"opener",
".",
"close",
"(",
")",
"compressedDataBuf",
"=",
"StringIO",
"(",
"compressedData",
")",
"d",
"=",
"zlib",
".",
"decompressobj",
"(",
"16",
"+",
"zlib",
".",
"MAX_WBITS",
")",
"buffer",
"=",
"compressedDataBuf",
".",
"read",
"(",
"1024",
")",
"#saveFile = open('/tmp/test.txt', \"wb\")",
"s",
"=",
"[",
"]",
"while",
"buffer",
":",
"s",
".",
"append",
"(",
"d",
".",
"decompress",
"(",
"buffer",
")",
")",
"buffer",
"=",
"compressedDataBuf",
".",
"read",
"(",
"1024",
")",
"s",
"=",
"''",
".",
"join",
"(",
"s",
")",
"return",
"s"
] |
Read a gzipped text file from a URL and return
contents as a string.
|
[
"Read",
"a",
"gzipped",
"text",
"file",
"from",
"a",
"URL",
"and",
"return",
"contents",
"as",
"a",
"string",
"."
] |
38efdf0e11d01bc00a135921cb91a19c03db5d5c
|
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/general.py#L71-L94
|
242,461
|
OpenGov/carpenter
|
carpenter/blocks/flagable.py
|
Flagable.get_worst_flag_level
|
def get_worst_flag_level(self, flags):
'''
Determines the worst flag present in the provided flags. If no
flags are given then a 'minor' value is returned.
'''
worst_flag_level = 0
for flag_level_name in flags:
flag_level = self.FLAG_LEVELS[flag_level_name]
if flag_level > worst_flag_level:
worst_flag_level = flag_level
return self.FLAG_LEVEL_CODES[worst_flag_level]
|
python
|
def get_worst_flag_level(self, flags):
'''
Determines the worst flag present in the provided flags. If no
flags are given then a 'minor' value is returned.
'''
worst_flag_level = 0
for flag_level_name in flags:
flag_level = self.FLAG_LEVELS[flag_level_name]
if flag_level > worst_flag_level:
worst_flag_level = flag_level
return self.FLAG_LEVEL_CODES[worst_flag_level]
|
[
"def",
"get_worst_flag_level",
"(",
"self",
",",
"flags",
")",
":",
"worst_flag_level",
"=",
"0",
"for",
"flag_level_name",
"in",
"flags",
":",
"flag_level",
"=",
"self",
".",
"FLAG_LEVELS",
"[",
"flag_level_name",
"]",
"if",
"flag_level",
">",
"worst_flag_level",
":",
"worst_flag_level",
"=",
"flag_level",
"return",
"self",
".",
"FLAG_LEVEL_CODES",
"[",
"worst_flag_level",
"]"
] |
Determines the worst flag present in the provided flags. If no
flags are given then a 'minor' value is returned.
|
[
"Determines",
"the",
"worst",
"flag",
"present",
"in",
"the",
"provided",
"flags",
".",
"If",
"no",
"flags",
"are",
"given",
"then",
"a",
"minor",
"value",
"is",
"returned",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/flagable.py#L76-L86
|
242,462
|
skitazaki/python-clitool
|
clitool/accesslog.py
|
parse
|
def parse(line):
""" Parse accesslog line to map Python dictionary.
Returned dictionary has following keys:
- time: access time (datetime; naive)
- utcoffset: UTC offset of access time (timedelta)
- host: remote IP address.
- path: HTTP request path, this will be splitted from query.
- query: HTTP requert query string removed from "?".
- method: HTTP request method.
- protocol: HTTP request version.
- status: HTTP response status code. (int)
- size: HTTP response size, if available. (int)
- referer: Referer header. if "-" is given, that will be ignored.
- ua: User agent. if "-" is given, that will be ignored.
- ident: remote logname
- user: remote user
- trailing: Additional information if using custom log format.
You can use "utcoffset" with `dateutil.tz.tzoffset` like followings:
>>> from dateutil.tz import tzoffset
>>> e = parse(line)
>>> tz = tzoffset(None, e['utcoffset'].total_seconds())
>>> t = e['time'].replace(tzinfo=tz)
:param line: one line of access log combined format
:type line: string
:rtype: dict
"""
m = LOG_FORMAT.match(line)
if m is None:
return
access = Access._make(m.groups())
entry = {
'host': access.host,
'path': access.path,
'query': access.query,
'method': access.method,
'protocol': access.protocol,
'status': int(access.status)
}
entry['time'] = datetime.datetime(
int(access.year), MONTH_ABBR[access.month], int(access.day),
int(access.hour), int(access.minute), int(access.second))
# Parse timezone string; "+YYMM" format.
entry['utcoffset'] = (1 if access.timezone[0] == '+' else -1) * \
datetime.timedelta(hours=int(access.timezone[1:3]),
minutes=int(access.timezone[3:5]))
if access.ident != '-':
entry['ident'] = access.ident
if access.user != '-':
entry['user'] = access.user
if access.size != '-':
entry['size'] = int(access.size)
if access.referer != '-':
entry['referer'] = access.referer
if access.ua != '-':
entry['ua'] = access.ua
if access.trailing:
entry['trailing'] = access.trailing.strip()
return entry
|
python
|
def parse(line):
""" Parse accesslog line to map Python dictionary.
Returned dictionary has following keys:
- time: access time (datetime; naive)
- utcoffset: UTC offset of access time (timedelta)
- host: remote IP address.
- path: HTTP request path, this will be splitted from query.
- query: HTTP requert query string removed from "?".
- method: HTTP request method.
- protocol: HTTP request version.
- status: HTTP response status code. (int)
- size: HTTP response size, if available. (int)
- referer: Referer header. if "-" is given, that will be ignored.
- ua: User agent. if "-" is given, that will be ignored.
- ident: remote logname
- user: remote user
- trailing: Additional information if using custom log format.
You can use "utcoffset" with `dateutil.tz.tzoffset` like followings:
>>> from dateutil.tz import tzoffset
>>> e = parse(line)
>>> tz = tzoffset(None, e['utcoffset'].total_seconds())
>>> t = e['time'].replace(tzinfo=tz)
:param line: one line of access log combined format
:type line: string
:rtype: dict
"""
m = LOG_FORMAT.match(line)
if m is None:
return
access = Access._make(m.groups())
entry = {
'host': access.host,
'path': access.path,
'query': access.query,
'method': access.method,
'protocol': access.protocol,
'status': int(access.status)
}
entry['time'] = datetime.datetime(
int(access.year), MONTH_ABBR[access.month], int(access.day),
int(access.hour), int(access.minute), int(access.second))
# Parse timezone string; "+YYMM" format.
entry['utcoffset'] = (1 if access.timezone[0] == '+' else -1) * \
datetime.timedelta(hours=int(access.timezone[1:3]),
minutes=int(access.timezone[3:5]))
if access.ident != '-':
entry['ident'] = access.ident
if access.user != '-':
entry['user'] = access.user
if access.size != '-':
entry['size'] = int(access.size)
if access.referer != '-':
entry['referer'] = access.referer
if access.ua != '-':
entry['ua'] = access.ua
if access.trailing:
entry['trailing'] = access.trailing.strip()
return entry
|
[
"def",
"parse",
"(",
"line",
")",
":",
"m",
"=",
"LOG_FORMAT",
".",
"match",
"(",
"line",
")",
"if",
"m",
"is",
"None",
":",
"return",
"access",
"=",
"Access",
".",
"_make",
"(",
"m",
".",
"groups",
"(",
")",
")",
"entry",
"=",
"{",
"'host'",
":",
"access",
".",
"host",
",",
"'path'",
":",
"access",
".",
"path",
",",
"'query'",
":",
"access",
".",
"query",
",",
"'method'",
":",
"access",
".",
"method",
",",
"'protocol'",
":",
"access",
".",
"protocol",
",",
"'status'",
":",
"int",
"(",
"access",
".",
"status",
")",
"}",
"entry",
"[",
"'time'",
"]",
"=",
"datetime",
".",
"datetime",
"(",
"int",
"(",
"access",
".",
"year",
")",
",",
"MONTH_ABBR",
"[",
"access",
".",
"month",
"]",
",",
"int",
"(",
"access",
".",
"day",
")",
",",
"int",
"(",
"access",
".",
"hour",
")",
",",
"int",
"(",
"access",
".",
"minute",
")",
",",
"int",
"(",
"access",
".",
"second",
")",
")",
"# Parse timezone string; \"+YYMM\" format.",
"entry",
"[",
"'utcoffset'",
"]",
"=",
"(",
"1",
"if",
"access",
".",
"timezone",
"[",
"0",
"]",
"==",
"'+'",
"else",
"-",
"1",
")",
"*",
"datetime",
".",
"timedelta",
"(",
"hours",
"=",
"int",
"(",
"access",
".",
"timezone",
"[",
"1",
":",
"3",
"]",
")",
",",
"minutes",
"=",
"int",
"(",
"access",
".",
"timezone",
"[",
"3",
":",
"5",
"]",
")",
")",
"if",
"access",
".",
"ident",
"!=",
"'-'",
":",
"entry",
"[",
"'ident'",
"]",
"=",
"access",
".",
"ident",
"if",
"access",
".",
"user",
"!=",
"'-'",
":",
"entry",
"[",
"'user'",
"]",
"=",
"access",
".",
"user",
"if",
"access",
".",
"size",
"!=",
"'-'",
":",
"entry",
"[",
"'size'",
"]",
"=",
"int",
"(",
"access",
".",
"size",
")",
"if",
"access",
".",
"referer",
"!=",
"'-'",
":",
"entry",
"[",
"'referer'",
"]",
"=",
"access",
".",
"referer",
"if",
"access",
".",
"ua",
"!=",
"'-'",
":",
"entry",
"[",
"'ua'",
"]",
"=",
"access",
".",
"ua",
"if",
"access",
".",
"trailing",
":",
"entry",
"[",
"'trailing'",
"]",
"=",
"access",
".",
"trailing",
".",
"strip",
"(",
")",
"return",
"entry"
] |
Parse accesslog line to map Python dictionary.
Returned dictionary has following keys:
- time: access time (datetime; naive)
- utcoffset: UTC offset of access time (timedelta)
- host: remote IP address.
- path: HTTP request path, this will be splitted from query.
- query: HTTP requert query string removed from "?".
- method: HTTP request method.
- protocol: HTTP request version.
- status: HTTP response status code. (int)
- size: HTTP response size, if available. (int)
- referer: Referer header. if "-" is given, that will be ignored.
- ua: User agent. if "-" is given, that will be ignored.
- ident: remote logname
- user: remote user
- trailing: Additional information if using custom log format.
You can use "utcoffset" with `dateutil.tz.tzoffset` like followings:
>>> from dateutil.tz import tzoffset
>>> e = parse(line)
>>> tz = tzoffset(None, e['utcoffset'].total_seconds())
>>> t = e['time'].replace(tzinfo=tz)
:param line: one line of access log combined format
:type line: string
:rtype: dict
|
[
"Parse",
"accesslog",
"line",
"to",
"map",
"Python",
"dictionary",
"."
] |
4971f8d093d51c6fd0e6cc536bbb597f78b570ab
|
https://github.com/skitazaki/python-clitool/blob/4971f8d093d51c6fd0e6cc536bbb597f78b570ab/clitool/accesslog.py#L60-L122
|
242,463
|
skitazaki/python-clitool
|
clitool/accesslog.py
|
logparse
|
def logparse(*args, **kwargs):
""" Parse access log on the terminal application.
If list of files are given, parse each file. Otherwise, parse standard
input.
:param args: supporting functions after processed raw log line
:type: list of callables
:rtype: tuple of (statistics, key/value report)
"""
from clitool.cli import clistream
from clitool.processor import SimpleDictReporter
lst = [parse] + args
reporter = SimpleDictReporter()
stats = clistream(reporter, *lst, **kwargs)
return stats, reporter.report()
|
python
|
def logparse(*args, **kwargs):
""" Parse access log on the terminal application.
If list of files are given, parse each file. Otherwise, parse standard
input.
:param args: supporting functions after processed raw log line
:type: list of callables
:rtype: tuple of (statistics, key/value report)
"""
from clitool.cli import clistream
from clitool.processor import SimpleDictReporter
lst = [parse] + args
reporter = SimpleDictReporter()
stats = clistream(reporter, *lst, **kwargs)
return stats, reporter.report()
|
[
"def",
"logparse",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"clitool",
".",
"cli",
"import",
"clistream",
"from",
"clitool",
".",
"processor",
"import",
"SimpleDictReporter",
"lst",
"=",
"[",
"parse",
"]",
"+",
"args",
"reporter",
"=",
"SimpleDictReporter",
"(",
")",
"stats",
"=",
"clistream",
"(",
"reporter",
",",
"*",
"lst",
",",
"*",
"*",
"kwargs",
")",
"return",
"stats",
",",
"reporter",
".",
"report",
"(",
")"
] |
Parse access log on the terminal application.
If list of files are given, parse each file. Otherwise, parse standard
input.
:param args: supporting functions after processed raw log line
:type: list of callables
:rtype: tuple of (statistics, key/value report)
|
[
"Parse",
"access",
"log",
"on",
"the",
"terminal",
"application",
".",
"If",
"list",
"of",
"files",
"are",
"given",
"parse",
"each",
"file",
".",
"Otherwise",
"parse",
"standard",
"input",
"."
] |
4971f8d093d51c6fd0e6cc536bbb597f78b570ab
|
https://github.com/skitazaki/python-clitool/blob/4971f8d093d51c6fd0e6cc536bbb597f78b570ab/clitool/accesslog.py#L169-L184
|
242,464
|
MacHu-GWU/angora-project
|
angora/math/outlier.py
|
std_filter
|
def std_filter(array, n_std=2.0, return_index=False):
"""Standard deviation outlier detector.
:param array: array of data.
:param n_std: default 2.0, exclude data out of ``n_std`` standard deviation.
:param return_index: boolean, default False, if True, only returns index.
"""
if not isinstance(array, np.ndarray):
array = np.array(array)
mean, std = array.mean(), array.std()
good_index = np.where(abs(array - mean) <= n_std * std)
bad_index = np.where(abs(array - mean) > n_std * std)
if return_index:
return good_index[0], bad_index[0]
else:
return array[good_index], array[bad_index]
|
python
|
def std_filter(array, n_std=2.0, return_index=False):
"""Standard deviation outlier detector.
:param array: array of data.
:param n_std: default 2.0, exclude data out of ``n_std`` standard deviation.
:param return_index: boolean, default False, if True, only returns index.
"""
if not isinstance(array, np.ndarray):
array = np.array(array)
mean, std = array.mean(), array.std()
good_index = np.where(abs(array - mean) <= n_std * std)
bad_index = np.where(abs(array - mean) > n_std * std)
if return_index:
return good_index[0], bad_index[0]
else:
return array[good_index], array[bad_index]
|
[
"def",
"std_filter",
"(",
"array",
",",
"n_std",
"=",
"2.0",
",",
"return_index",
"=",
"False",
")",
":",
"if",
"not",
"isinstance",
"(",
"array",
",",
"np",
".",
"ndarray",
")",
":",
"array",
"=",
"np",
".",
"array",
"(",
"array",
")",
"mean",
",",
"std",
"=",
"array",
".",
"mean",
"(",
")",
",",
"array",
".",
"std",
"(",
")",
"good_index",
"=",
"np",
".",
"where",
"(",
"abs",
"(",
"array",
"-",
"mean",
")",
"<=",
"n_std",
"*",
"std",
")",
"bad_index",
"=",
"np",
".",
"where",
"(",
"abs",
"(",
"array",
"-",
"mean",
")",
">",
"n_std",
"*",
"std",
")",
"if",
"return_index",
":",
"return",
"good_index",
"[",
"0",
"]",
",",
"bad_index",
"[",
"0",
"]",
"else",
":",
"return",
"array",
"[",
"good_index",
"]",
",",
"array",
"[",
"bad_index",
"]"
] |
Standard deviation outlier detector.
:param array: array of data.
:param n_std: default 2.0, exclude data out of ``n_std`` standard deviation.
:param return_index: boolean, default False, if True, only returns index.
|
[
"Standard",
"deviation",
"outlier",
"detector",
"."
] |
689a60da51cd88680ddbe26e28dbe81e6b01d275
|
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/math/outlier.py#L47-L64
|
242,465
|
MacHu-GWU/angora-project
|
angora/math/outlier.py
|
box_filter
|
def box_filter(array, n_iqr=1.5, return_index=False):
"""Box plot outlier detector.
:param array: array of data.
:param n_std: default 1.5, exclude data out of ``n_iqr`` IQR.
:param return_index: boolean, default False, if True, only returns index.
"""
if not isinstance(array, np.ndarray):
array = np.array(array)
Q3 = np.percentile(array, 75)
Q1 = np.percentile(array, 25)
IQR = Q3 - Q1
lower, upper = Q1 - n_iqr * IQR, Q3 + n_iqr * IQR
good_index = np.where(np.logical_and(array >= lower, array <= upper))
bad_index = np.where(np.logical_or(array < lower, array > upper))
if return_index:
return good_index[0], bad_index[0]
else:
return array[good_index], array[bad_index]
|
python
|
def box_filter(array, n_iqr=1.5, return_index=False):
"""Box plot outlier detector.
:param array: array of data.
:param n_std: default 1.5, exclude data out of ``n_iqr`` IQR.
:param return_index: boolean, default False, if True, only returns index.
"""
if not isinstance(array, np.ndarray):
array = np.array(array)
Q3 = np.percentile(array, 75)
Q1 = np.percentile(array, 25)
IQR = Q3 - Q1
lower, upper = Q1 - n_iqr * IQR, Q3 + n_iqr * IQR
good_index = np.where(np.logical_and(array >= lower, array <= upper))
bad_index = np.where(np.logical_or(array < lower, array > upper))
if return_index:
return good_index[0], bad_index[0]
else:
return array[good_index], array[bad_index]
|
[
"def",
"box_filter",
"(",
"array",
",",
"n_iqr",
"=",
"1.5",
",",
"return_index",
"=",
"False",
")",
":",
"if",
"not",
"isinstance",
"(",
"array",
",",
"np",
".",
"ndarray",
")",
":",
"array",
"=",
"np",
".",
"array",
"(",
"array",
")",
"Q3",
"=",
"np",
".",
"percentile",
"(",
"array",
",",
"75",
")",
"Q1",
"=",
"np",
".",
"percentile",
"(",
"array",
",",
"25",
")",
"IQR",
"=",
"Q3",
"-",
"Q1",
"lower",
",",
"upper",
"=",
"Q1",
"-",
"n_iqr",
"*",
"IQR",
",",
"Q3",
"+",
"n_iqr",
"*",
"IQR",
"good_index",
"=",
"np",
".",
"where",
"(",
"np",
".",
"logical_and",
"(",
"array",
">=",
"lower",
",",
"array",
"<=",
"upper",
")",
")",
"bad_index",
"=",
"np",
".",
"where",
"(",
"np",
".",
"logical_or",
"(",
"array",
"<",
"lower",
",",
"array",
">",
"upper",
")",
")",
"if",
"return_index",
":",
"return",
"good_index",
"[",
"0",
"]",
",",
"bad_index",
"[",
"0",
"]",
"else",
":",
"return",
"array",
"[",
"good_index",
"]",
",",
"array",
"[",
"bad_index",
"]"
] |
Box plot outlier detector.
:param array: array of data.
:param n_std: default 1.5, exclude data out of ``n_iqr`` IQR.
:param return_index: boolean, default False, if True, only returns index.
|
[
"Box",
"plot",
"outlier",
"detector",
"."
] |
689a60da51cd88680ddbe26e28dbe81e6b01d275
|
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/math/outlier.py#L67-L87
|
242,466
|
esterhui/pypu
|
pypu/pusher_utils.py
|
resize_compute_width_height
|
def resize_compute_width_height(fullfile,_megapixels):
"""Given image file and desired megapixels,
computes the new width and height"""
img = Image.open(fullfile)
width,height=img.size
current_megapixels=width*height/(2.0**20)
scale=sqrt(_megapixels/float(current_megapixels))
logger.debug('A resize scale would be %f'%(scale))
# Can't make bigger, return original
if scale>= 1.0:
logger.warning('Image is %0.1f MP, trying to scale to %0.1f MP - just using original!',
current_megapixels,_megapixels);
return width,height
new_width=int(width*scale)
new_height=int(height*scale)
return new_width,new_height
|
python
|
def resize_compute_width_height(fullfile,_megapixels):
"""Given image file and desired megapixels,
computes the new width and height"""
img = Image.open(fullfile)
width,height=img.size
current_megapixels=width*height/(2.0**20)
scale=sqrt(_megapixels/float(current_megapixels))
logger.debug('A resize scale would be %f'%(scale))
# Can't make bigger, return original
if scale>= 1.0:
logger.warning('Image is %0.1f MP, trying to scale to %0.1f MP - just using original!',
current_megapixels,_megapixels);
return width,height
new_width=int(width*scale)
new_height=int(height*scale)
return new_width,new_height
|
[
"def",
"resize_compute_width_height",
"(",
"fullfile",
",",
"_megapixels",
")",
":",
"img",
"=",
"Image",
".",
"open",
"(",
"fullfile",
")",
"width",
",",
"height",
"=",
"img",
".",
"size",
"current_megapixels",
"=",
"width",
"*",
"height",
"/",
"(",
"2.0",
"**",
"20",
")",
"scale",
"=",
"sqrt",
"(",
"_megapixels",
"/",
"float",
"(",
"current_megapixels",
")",
")",
"logger",
".",
"debug",
"(",
"'A resize scale would be %f'",
"%",
"(",
"scale",
")",
")",
"# Can't make bigger, return original",
"if",
"scale",
">=",
"1.0",
":",
"logger",
".",
"warning",
"(",
"'Image is %0.1f MP, trying to scale to %0.1f MP - just using original!'",
",",
"current_megapixels",
",",
"_megapixels",
")",
"return",
"width",
",",
"height",
"new_width",
"=",
"int",
"(",
"width",
"*",
"scale",
")",
"new_height",
"=",
"int",
"(",
"height",
"*",
"scale",
")",
"return",
"new_width",
",",
"new_height"
] |
Given image file and desired megapixels,
computes the new width and height
|
[
"Given",
"image",
"file",
"and",
"desired",
"megapixels",
"computes",
"the",
"new",
"width",
"and",
"height"
] |
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
|
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/pusher_utils.py#L84-L104
|
242,467
|
esterhui/pypu
|
pypu/pusher_utils.py
|
getexif_location
|
def getexif_location(directory,fn):
"""
directory - Dir where file is located
fn - filename to check for EXIF GPS
Returns touple of lat,lon if EXIF
eg. (34.035460,-118.227885)
files contains GPS info, otherwise returns
None,None
"""
lat=None
lon=None
sign_lat=+1.0
sign_lon=+1.0
# Check if photo as geo info already
exif_tags=exifread.process_file(\
open(os.path.join(directory,fn),'rb'))
try:
d,m,s=exif_tags['GPS GPSLongitude'].values
# West is negative longitudes, change sign
if exif_tags['GPS GPSLongitudeRef'].values=='W':
sign_lon=-1.0
lon=float(d.num) +float(m.num)/60.0 +float(s.num/float(s.den))/3600.0
lon=lon*sign_lon
d,m,s=exif_tags['GPS GPSLatitude'].values
# South is negative latitude, change sign
if exif_tags['GPS GPSLatitudeRef'].values=='S':
sign_lat=-1.0
lat=float(d.num)\
+float(m.num)/60.0\
+float(s.num/float(s.den))/3600.0
lat=lat*sign_lat
except:
logger.debug("%s - Couldn't extract GPS info"%(fn))
return lat,lon
|
python
|
def getexif_location(directory,fn):
"""
directory - Dir where file is located
fn - filename to check for EXIF GPS
Returns touple of lat,lon if EXIF
eg. (34.035460,-118.227885)
files contains GPS info, otherwise returns
None,None
"""
lat=None
lon=None
sign_lat=+1.0
sign_lon=+1.0
# Check if photo as geo info already
exif_tags=exifread.process_file(\
open(os.path.join(directory,fn),'rb'))
try:
d,m,s=exif_tags['GPS GPSLongitude'].values
# West is negative longitudes, change sign
if exif_tags['GPS GPSLongitudeRef'].values=='W':
sign_lon=-1.0
lon=float(d.num) +float(m.num)/60.0 +float(s.num/float(s.den))/3600.0
lon=lon*sign_lon
d,m,s=exif_tags['GPS GPSLatitude'].values
# South is negative latitude, change sign
if exif_tags['GPS GPSLatitudeRef'].values=='S':
sign_lat=-1.0
lat=float(d.num)\
+float(m.num)/60.0\
+float(s.num/float(s.den))/3600.0
lat=lat*sign_lat
except:
logger.debug("%s - Couldn't extract GPS info"%(fn))
return lat,lon
|
[
"def",
"getexif_location",
"(",
"directory",
",",
"fn",
")",
":",
"lat",
"=",
"None",
"lon",
"=",
"None",
"sign_lat",
"=",
"+",
"1.0",
"sign_lon",
"=",
"+",
"1.0",
"# Check if photo as geo info already",
"exif_tags",
"=",
"exifread",
".",
"process_file",
"(",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"fn",
")",
",",
"'rb'",
")",
")",
"try",
":",
"d",
",",
"m",
",",
"s",
"=",
"exif_tags",
"[",
"'GPS GPSLongitude'",
"]",
".",
"values",
"# West is negative longitudes, change sign",
"if",
"exif_tags",
"[",
"'GPS GPSLongitudeRef'",
"]",
".",
"values",
"==",
"'W'",
":",
"sign_lon",
"=",
"-",
"1.0",
"lon",
"=",
"float",
"(",
"d",
".",
"num",
")",
"+",
"float",
"(",
"m",
".",
"num",
")",
"/",
"60.0",
"+",
"float",
"(",
"s",
".",
"num",
"/",
"float",
"(",
"s",
".",
"den",
")",
")",
"/",
"3600.0",
"lon",
"=",
"lon",
"*",
"sign_lon",
"d",
",",
"m",
",",
"s",
"=",
"exif_tags",
"[",
"'GPS GPSLatitude'",
"]",
".",
"values",
"# South is negative latitude, change sign",
"if",
"exif_tags",
"[",
"'GPS GPSLatitudeRef'",
"]",
".",
"values",
"==",
"'S'",
":",
"sign_lat",
"=",
"-",
"1.0",
"lat",
"=",
"float",
"(",
"d",
".",
"num",
")",
"+",
"float",
"(",
"m",
".",
"num",
")",
"/",
"60.0",
"+",
"float",
"(",
"s",
".",
"num",
"/",
"float",
"(",
"s",
".",
"den",
")",
")",
"/",
"3600.0",
"lat",
"=",
"lat",
"*",
"sign_lat",
"except",
":",
"logger",
".",
"debug",
"(",
"\"%s - Couldn't extract GPS info\"",
"%",
"(",
"fn",
")",
")",
"return",
"lat",
",",
"lon"
] |
directory - Dir where file is located
fn - filename to check for EXIF GPS
Returns touple of lat,lon if EXIF
eg. (34.035460,-118.227885)
files contains GPS info, otherwise returns
None,None
|
[
"directory",
"-",
"Dir",
"where",
"file",
"is",
"located",
"fn",
"-",
"filename",
"to",
"check",
"for",
"EXIF",
"GPS"
] |
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
|
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/pusher_utils.py#L107-L143
|
242,468
|
CognitionGuidedSurgery/pyclictk
|
clictk/model.py
|
Executable.from_etree
|
def from_etree(tree):
"""Constructs an executable form a given ElementTree structure.
:param tree:
:type tree: xml.etree.ElementTree.ElementTree
:rtype: Executable
"""
exe = Executable(tree)
exe.category = tree.findtext('category')
exe.version = tree.findtext('version')
exe.title = tree.findtext('title') or exe.name
exe.description = tree.findtext('description')
exe.license = tree.findtext('license') or "unknown"
exe.contributor = tree.findtext('contributor')
for ps in tree.iterfind("parameters"):
assert isinstance(ps, ET.Element)
paras = ParameterGroup(
ps.findtext("label"),
ps.findtext("description"),
ps.attrib.get('advanced', "false") == "true",
filter(lambda x: x is not None,
map(Parameter.from_xml_node, list(ps))))
exe.parameter_groups.append(paras)
return exe
|
python
|
def from_etree(tree):
"""Constructs an executable form a given ElementTree structure.
:param tree:
:type tree: xml.etree.ElementTree.ElementTree
:rtype: Executable
"""
exe = Executable(tree)
exe.category = tree.findtext('category')
exe.version = tree.findtext('version')
exe.title = tree.findtext('title') or exe.name
exe.description = tree.findtext('description')
exe.license = tree.findtext('license') or "unknown"
exe.contributor = tree.findtext('contributor')
for ps in tree.iterfind("parameters"):
assert isinstance(ps, ET.Element)
paras = ParameterGroup(
ps.findtext("label"),
ps.findtext("description"),
ps.attrib.get('advanced', "false") == "true",
filter(lambda x: x is not None,
map(Parameter.from_xml_node, list(ps))))
exe.parameter_groups.append(paras)
return exe
|
[
"def",
"from_etree",
"(",
"tree",
")",
":",
"exe",
"=",
"Executable",
"(",
"tree",
")",
"exe",
".",
"category",
"=",
"tree",
".",
"findtext",
"(",
"'category'",
")",
"exe",
".",
"version",
"=",
"tree",
".",
"findtext",
"(",
"'version'",
")",
"exe",
".",
"title",
"=",
"tree",
".",
"findtext",
"(",
"'title'",
")",
"or",
"exe",
".",
"name",
"exe",
".",
"description",
"=",
"tree",
".",
"findtext",
"(",
"'description'",
")",
"exe",
".",
"license",
"=",
"tree",
".",
"findtext",
"(",
"'license'",
")",
"or",
"\"unknown\"",
"exe",
".",
"contributor",
"=",
"tree",
".",
"findtext",
"(",
"'contributor'",
")",
"for",
"ps",
"in",
"tree",
".",
"iterfind",
"(",
"\"parameters\"",
")",
":",
"assert",
"isinstance",
"(",
"ps",
",",
"ET",
".",
"Element",
")",
"paras",
"=",
"ParameterGroup",
"(",
"ps",
".",
"findtext",
"(",
"\"label\"",
")",
",",
"ps",
".",
"findtext",
"(",
"\"description\"",
")",
",",
"ps",
".",
"attrib",
".",
"get",
"(",
"'advanced'",
",",
"\"false\"",
")",
"==",
"\"true\"",
",",
"filter",
"(",
"lambda",
"x",
":",
"x",
"is",
"not",
"None",
",",
"map",
"(",
"Parameter",
".",
"from_xml_node",
",",
"list",
"(",
"ps",
")",
")",
")",
")",
"exe",
".",
"parameter_groups",
".",
"append",
"(",
"paras",
")",
"return",
"exe"
] |
Constructs an executable form a given ElementTree structure.
:param tree:
:type tree: xml.etree.ElementTree.ElementTree
:rtype: Executable
|
[
"Constructs",
"an",
"executable",
"form",
"a",
"given",
"ElementTree",
"structure",
"."
] |
74915098a24a33adb46d8738f9c4746d91ecc1dc
|
https://github.com/CognitionGuidedSurgery/pyclictk/blob/74915098a24a33adb46d8738f9c4746d91ecc1dc/clictk/model.py#L501-L529
|
242,469
|
edwards-lab/libGWAS
|
libgwas/snp_boundary_check.py
|
SnpBoundaryCheck.NoExclusions
|
def NoExclusions(self):
"""Determine that there are no exclusion criterion in play
:return: True if there is no real boundary specification of any kind.
Simple method allowing parsers to short circuit the determination of
missingness, which can be moderately compute intensive.
"""
if len(self.start_bounds) + len(self.target_rs) + len(self.ignored_rs) == 0:
return BoundaryCheck.chrom == -1
return False
|
python
|
def NoExclusions(self):
"""Determine that there are no exclusion criterion in play
:return: True if there is no real boundary specification of any kind.
Simple method allowing parsers to short circuit the determination of
missingness, which can be moderately compute intensive.
"""
if len(self.start_bounds) + len(self.target_rs) + len(self.ignored_rs) == 0:
return BoundaryCheck.chrom == -1
return False
|
[
"def",
"NoExclusions",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"start_bounds",
")",
"+",
"len",
"(",
"self",
".",
"target_rs",
")",
"+",
"len",
"(",
"self",
".",
"ignored_rs",
")",
"==",
"0",
":",
"return",
"BoundaryCheck",
".",
"chrom",
"==",
"-",
"1",
"return",
"False"
] |
Determine that there are no exclusion criterion in play
:return: True if there is no real boundary specification of any kind.
Simple method allowing parsers to short circuit the determination of
missingness, which can be moderately compute intensive.
|
[
"Determine",
"that",
"there",
"are",
"no",
"exclusion",
"criterion",
"in",
"play"
] |
d68c9a083d443dfa5d7c5112de29010909cfe23f
|
https://github.com/edwards-lab/libGWAS/blob/d68c9a083d443dfa5d7c5112de29010909cfe23f/libgwas/snp_boundary_check.py#L128-L138
|
242,470
|
mjalas/messaging-client
|
messaging_client/messaging_client.py
|
MessagingClient.set_address
|
def set_address(self, host, port):
"""Add host and port attributes"""
self.host = host
self.port = port
|
python
|
def set_address(self, host, port):
"""Add host and port attributes"""
self.host = host
self.port = port
|
[
"def",
"set_address",
"(",
"self",
",",
"host",
",",
"port",
")",
":",
"self",
".",
"host",
"=",
"host",
"self",
".",
"port",
"=",
"port"
] |
Add host and port attributes
|
[
"Add",
"host",
"and",
"port",
"attributes"
] |
b72ad622d9c94a879fe1085f0dbb52349892cd15
|
https://github.com/mjalas/messaging-client/blob/b72ad622d9c94a879fe1085f0dbb52349892cd15/messaging_client/messaging_client.py#L32-L35
|
242,471
|
mjalas/messaging-client
|
messaging_client/messaging_client.py
|
MessagingClient.connect
|
def connect(self, host=None, port=None):
"""Connects to given host address and port."""
host = self.host if host is None else host
port = self.port if port is None else port
self.socket.connect(host, port)
|
python
|
def connect(self, host=None, port=None):
"""Connects to given host address and port."""
host = self.host if host is None else host
port = self.port if port is None else port
self.socket.connect(host, port)
|
[
"def",
"connect",
"(",
"self",
",",
"host",
"=",
"None",
",",
"port",
"=",
"None",
")",
":",
"host",
"=",
"self",
".",
"host",
"if",
"host",
"is",
"None",
"else",
"host",
"port",
"=",
"self",
".",
"port",
"if",
"port",
"is",
"None",
"else",
"port",
"self",
".",
"socket",
".",
"connect",
"(",
"host",
",",
"port",
")"
] |
Connects to given host address and port.
|
[
"Connects",
"to",
"given",
"host",
"address",
"and",
"port",
"."
] |
b72ad622d9c94a879fe1085f0dbb52349892cd15
|
https://github.com/mjalas/messaging-client/blob/b72ad622d9c94a879fe1085f0dbb52349892cd15/messaging_client/messaging_client.py#L37-L41
|
242,472
|
mjalas/messaging-client
|
messaging_client/messaging_client.py
|
MessagingClient.send_file_message
|
def send_file_message(self, filename):
"""Send message inside the given file."""
data = self._readFile(filename)
self.print_debug_message(data)
self.socket.send(data)
|
python
|
def send_file_message(self, filename):
"""Send message inside the given file."""
data = self._readFile(filename)
self.print_debug_message(data)
self.socket.send(data)
|
[
"def",
"send_file_message",
"(",
"self",
",",
"filename",
")",
":",
"data",
"=",
"self",
".",
"_readFile",
"(",
"filename",
")",
"self",
".",
"print_debug_message",
"(",
"data",
")",
"self",
".",
"socket",
".",
"send",
"(",
"data",
")"
] |
Send message inside the given file.
|
[
"Send",
"message",
"inside",
"the",
"given",
"file",
"."
] |
b72ad622d9c94a879fe1085f0dbb52349892cd15
|
https://github.com/mjalas/messaging-client/blob/b72ad622d9c94a879fe1085f0dbb52349892cd15/messaging_client/messaging_client.py#L54-L58
|
242,473
|
mjalas/messaging-client
|
messaging_client/messaging_client.py
|
MessagingClient.send_message
|
def send_message(self, message):
"""Send a given message to the remote host."""
self.print_debug_message(message)
self.socket.send(message)
|
python
|
def send_message(self, message):
"""Send a given message to the remote host."""
self.print_debug_message(message)
self.socket.send(message)
|
[
"def",
"send_message",
"(",
"self",
",",
"message",
")",
":",
"self",
".",
"print_debug_message",
"(",
"message",
")",
"self",
".",
"socket",
".",
"send",
"(",
"message",
")"
] |
Send a given message to the remote host.
|
[
"Send",
"a",
"given",
"message",
"to",
"the",
"remote",
"host",
"."
] |
b72ad622d9c94a879fe1085f0dbb52349892cd15
|
https://github.com/mjalas/messaging-client/blob/b72ad622d9c94a879fe1085f0dbb52349892cd15/messaging_client/messaging_client.py#L60-L63
|
242,474
|
hsoft/xibless
|
xibless/util.py
|
modified_after
|
def modified_after(first_path, second_path):
"""Returns True if first_path's mtime is higher than second_path's mtime."""
try:
first_mtime = os.stat(first_path).st_mtime
except EnvironmentError:
return False
try:
second_mtime = os.stat(second_path).st_mtime
except EnvironmentError:
return True
return first_mtime > second_mtime
|
python
|
def modified_after(first_path, second_path):
"""Returns True if first_path's mtime is higher than second_path's mtime."""
try:
first_mtime = os.stat(first_path).st_mtime
except EnvironmentError:
return False
try:
second_mtime = os.stat(second_path).st_mtime
except EnvironmentError:
return True
return first_mtime > second_mtime
|
[
"def",
"modified_after",
"(",
"first_path",
",",
"second_path",
")",
":",
"try",
":",
"first_mtime",
"=",
"os",
".",
"stat",
"(",
"first_path",
")",
".",
"st_mtime",
"except",
"EnvironmentError",
":",
"return",
"False",
"try",
":",
"second_mtime",
"=",
"os",
".",
"stat",
"(",
"second_path",
")",
".",
"st_mtime",
"except",
"EnvironmentError",
":",
"return",
"True",
"return",
"first_mtime",
">",
"second_mtime"
] |
Returns True if first_path's mtime is higher than second_path's mtime.
|
[
"Returns",
"True",
"if",
"first_path",
"s",
"mtime",
"is",
"higher",
"than",
"second_path",
"s",
"mtime",
"."
] |
a7393d28b4a31698869b2203d4d8b3398de1de7f
|
https://github.com/hsoft/xibless/blob/a7393d28b4a31698869b2203d4d8b3398de1de7f/xibless/util.py#L3-L13
|
242,475
|
the01/python-paps
|
paps/si/app/sensorServer.py
|
SensorServer._do_packet
|
def _do_packet(self, packet, ip, port):
"""
React to incoming packet
:param packet: Packet to handle
:type packet: T >= paps.si.app.message.APPMessage
:param ip: Client ip address
:type ip: unicode
:param port: Client port
:type port: int
:rtype: None
"""
msg_type = packet.header.message_type
if msg_type == MsgType.JOIN:
self._do_join_packet(packet, ip, port)
elif msg_type == MsgType.UNJOIN:
self._do_unjoin_packet(packet, ip, port)
elif msg_type == MsgType.UPDATE:
self._do_update_packet(packet, ip, port)
|
python
|
def _do_packet(self, packet, ip, port):
"""
React to incoming packet
:param packet: Packet to handle
:type packet: T >= paps.si.app.message.APPMessage
:param ip: Client ip address
:type ip: unicode
:param port: Client port
:type port: int
:rtype: None
"""
msg_type = packet.header.message_type
if msg_type == MsgType.JOIN:
self._do_join_packet(packet, ip, port)
elif msg_type == MsgType.UNJOIN:
self._do_unjoin_packet(packet, ip, port)
elif msg_type == MsgType.UPDATE:
self._do_update_packet(packet, ip, port)
|
[
"def",
"_do_packet",
"(",
"self",
",",
"packet",
",",
"ip",
",",
"port",
")",
":",
"msg_type",
"=",
"packet",
".",
"header",
".",
"message_type",
"if",
"msg_type",
"==",
"MsgType",
".",
"JOIN",
":",
"self",
".",
"_do_join_packet",
"(",
"packet",
",",
"ip",
",",
"port",
")",
"elif",
"msg_type",
"==",
"MsgType",
".",
"UNJOIN",
":",
"self",
".",
"_do_unjoin_packet",
"(",
"packet",
",",
"ip",
",",
"port",
")",
"elif",
"msg_type",
"==",
"MsgType",
".",
"UPDATE",
":",
"self",
".",
"_do_update_packet",
"(",
"packet",
",",
"ip",
",",
"port",
")"
] |
React to incoming packet
:param packet: Packet to handle
:type packet: T >= paps.si.app.message.APPMessage
:param ip: Client ip address
:type ip: unicode
:param port: Client port
:type port: int
:rtype: None
|
[
"React",
"to",
"incoming",
"packet"
] |
2dde5a71913e4c7b22901cf05c6ecedd890919c4
|
https://github.com/the01/python-paps/blob/2dde5a71913e4c7b22901cf05c6ecedd890919c4/paps/si/app/sensorServer.py#L75-L94
|
242,476
|
the01/python-paps
|
paps/si/app/sensorServer.py
|
SensorServer._do_join_packet
|
def _do_join_packet(self, packet, ip, port):
"""
React to join packet - add a client to this server
:param packet: Packet from client that wants to join
:type packet: paps.si.app.message.APPJoinMessage
:param ip: Client ip address
:type ip: unicode
:param port: Client port
:type port: int
:rtype: None
"""
self.debug("()")
device_id = packet.header.device_id
key = u"{}:{}".format(ip, port)
if device_id == Id.REQUEST:
device_id = self._new_device_id(key)
client = self._clients.get(device_id, {})
data = {}
if packet.payload:
try:
data = packet.payload
except:
data = {}
client['device_id'] = device_id
client['key'] = key
people = []
try:
for index, person_dict in enumerate(data['people']):
person = Person()
person.from_dict(person_dict)
person.id = u"{}.{}".format(device_id, person.id)
# To get original id -> id.split('.')[0]
people.append(person)
self.changer.on_person_new(people)
except:
self.exception("Failed to update people")
return
# Original ids (without device id)
client['people'] = people
# Add config to client data?
client_dict = dict(client)
del client_dict['people']
self._send_packet(ip, port, APPConfigMessage(payload=client_dict))
self._clients[device_id] = client
self._key2deviceId[key] = device_id
|
python
|
def _do_join_packet(self, packet, ip, port):
"""
React to join packet - add a client to this server
:param packet: Packet from client that wants to join
:type packet: paps.si.app.message.APPJoinMessage
:param ip: Client ip address
:type ip: unicode
:param port: Client port
:type port: int
:rtype: None
"""
self.debug("()")
device_id = packet.header.device_id
key = u"{}:{}".format(ip, port)
if device_id == Id.REQUEST:
device_id = self._new_device_id(key)
client = self._clients.get(device_id, {})
data = {}
if packet.payload:
try:
data = packet.payload
except:
data = {}
client['device_id'] = device_id
client['key'] = key
people = []
try:
for index, person_dict in enumerate(data['people']):
person = Person()
person.from_dict(person_dict)
person.id = u"{}.{}".format(device_id, person.id)
# To get original id -> id.split('.')[0]
people.append(person)
self.changer.on_person_new(people)
except:
self.exception("Failed to update people")
return
# Original ids (without device id)
client['people'] = people
# Add config to client data?
client_dict = dict(client)
del client_dict['people']
self._send_packet(ip, port, APPConfigMessage(payload=client_dict))
self._clients[device_id] = client
self._key2deviceId[key] = device_id
|
[
"def",
"_do_join_packet",
"(",
"self",
",",
"packet",
",",
"ip",
",",
"port",
")",
":",
"self",
".",
"debug",
"(",
"\"()\"",
")",
"device_id",
"=",
"packet",
".",
"header",
".",
"device_id",
"key",
"=",
"u\"{}:{}\"",
".",
"format",
"(",
"ip",
",",
"port",
")",
"if",
"device_id",
"==",
"Id",
".",
"REQUEST",
":",
"device_id",
"=",
"self",
".",
"_new_device_id",
"(",
"key",
")",
"client",
"=",
"self",
".",
"_clients",
".",
"get",
"(",
"device_id",
",",
"{",
"}",
")",
"data",
"=",
"{",
"}",
"if",
"packet",
".",
"payload",
":",
"try",
":",
"data",
"=",
"packet",
".",
"payload",
"except",
":",
"data",
"=",
"{",
"}",
"client",
"[",
"'device_id'",
"]",
"=",
"device_id",
"client",
"[",
"'key'",
"]",
"=",
"key",
"people",
"=",
"[",
"]",
"try",
":",
"for",
"index",
",",
"person_dict",
"in",
"enumerate",
"(",
"data",
"[",
"'people'",
"]",
")",
":",
"person",
"=",
"Person",
"(",
")",
"person",
".",
"from_dict",
"(",
"person_dict",
")",
"person",
".",
"id",
"=",
"u\"{}.{}\"",
".",
"format",
"(",
"device_id",
",",
"person",
".",
"id",
")",
"# To get original id -> id.split('.')[0]",
"people",
".",
"append",
"(",
"person",
")",
"self",
".",
"changer",
".",
"on_person_new",
"(",
"people",
")",
"except",
":",
"self",
".",
"exception",
"(",
"\"Failed to update people\"",
")",
"return",
"# Original ids (without device id)",
"client",
"[",
"'people'",
"]",
"=",
"people",
"# Add config to client data?",
"client_dict",
"=",
"dict",
"(",
"client",
")",
"del",
"client_dict",
"[",
"'people'",
"]",
"self",
".",
"_send_packet",
"(",
"ip",
",",
"port",
",",
"APPConfigMessage",
"(",
"payload",
"=",
"client_dict",
")",
")",
"self",
".",
"_clients",
"[",
"device_id",
"]",
"=",
"client",
"self",
".",
"_key2deviceId",
"[",
"key",
"]",
"=",
"device_id"
] |
React to join packet - add a client to this server
:param packet: Packet from client that wants to join
:type packet: paps.si.app.message.APPJoinMessage
:param ip: Client ip address
:type ip: unicode
:param port: Client port
:type port: int
:rtype: None
|
[
"React",
"to",
"join",
"packet",
"-",
"add",
"a",
"client",
"to",
"this",
"server"
] |
2dde5a71913e4c7b22901cf05c6ecedd890919c4
|
https://github.com/the01/python-paps/blob/2dde5a71913e4c7b22901cf05c6ecedd890919c4/paps/si/app/sensorServer.py#L96-L147
|
242,477
|
the01/python-paps
|
paps/si/app/sensorServer.py
|
SensorServer._do_unjoin_packet
|
def _do_unjoin_packet(self, packet, ip, port):
"""
React to unjoin packet - remove a client from this server
:param packet: Packet from client that wants to join
:type packet: paps.si.app.message.APPJoinMessage
:param ip: Client ip address
:type ip: unicode
:param port: Client port
:type port: int
:rtype: None
"""
self.debug("()")
device_id = packet.header.device_id
if device_id <= Id.SERVER:
self.error("ProtocolViolation: Invalid device id")
return
client = self._clients.get(device_id)
if not client:
self.error("ProtocolViolation: Client is not registered")
return
key = u"{}:{}".format(ip, port)
if client['key'] != key:
self.error(
u"ProtocolViolation: Client key ({}) has changed: {}".format(
client['key'], key
)
)
return
# Packet info seems ok
try:
self.changer.on_person_leave(client['people'])
except:
self.exception("Failed to remove people")
return
# Forget client?
del self._clients[device_id]
del self._key2deviceId[key]
del client
|
python
|
def _do_unjoin_packet(self, packet, ip, port):
"""
React to unjoin packet - remove a client from this server
:param packet: Packet from client that wants to join
:type packet: paps.si.app.message.APPJoinMessage
:param ip: Client ip address
:type ip: unicode
:param port: Client port
:type port: int
:rtype: None
"""
self.debug("()")
device_id = packet.header.device_id
if device_id <= Id.SERVER:
self.error("ProtocolViolation: Invalid device id")
return
client = self._clients.get(device_id)
if not client:
self.error("ProtocolViolation: Client is not registered")
return
key = u"{}:{}".format(ip, port)
if client['key'] != key:
self.error(
u"ProtocolViolation: Client key ({}) has changed: {}".format(
client['key'], key
)
)
return
# Packet info seems ok
try:
self.changer.on_person_leave(client['people'])
except:
self.exception("Failed to remove people")
return
# Forget client?
del self._clients[device_id]
del self._key2deviceId[key]
del client
|
[
"def",
"_do_unjoin_packet",
"(",
"self",
",",
"packet",
",",
"ip",
",",
"port",
")",
":",
"self",
".",
"debug",
"(",
"\"()\"",
")",
"device_id",
"=",
"packet",
".",
"header",
".",
"device_id",
"if",
"device_id",
"<=",
"Id",
".",
"SERVER",
":",
"self",
".",
"error",
"(",
"\"ProtocolViolation: Invalid device id\"",
")",
"return",
"client",
"=",
"self",
".",
"_clients",
".",
"get",
"(",
"device_id",
")",
"if",
"not",
"client",
":",
"self",
".",
"error",
"(",
"\"ProtocolViolation: Client is not registered\"",
")",
"return",
"key",
"=",
"u\"{}:{}\"",
".",
"format",
"(",
"ip",
",",
"port",
")",
"if",
"client",
"[",
"'key'",
"]",
"!=",
"key",
":",
"self",
".",
"error",
"(",
"u\"ProtocolViolation: Client key ({}) has changed: {}\"",
".",
"format",
"(",
"client",
"[",
"'key'",
"]",
",",
"key",
")",
")",
"return",
"# Packet info seems ok",
"try",
":",
"self",
".",
"changer",
".",
"on_person_leave",
"(",
"client",
"[",
"'people'",
"]",
")",
"except",
":",
"self",
".",
"exception",
"(",
"\"Failed to remove people\"",
")",
"return",
"# Forget client?",
"del",
"self",
".",
"_clients",
"[",
"device_id",
"]",
"del",
"self",
".",
"_key2deviceId",
"[",
"key",
"]",
"del",
"client"
] |
React to unjoin packet - remove a client from this server
:param packet: Packet from client that wants to join
:type packet: paps.si.app.message.APPJoinMessage
:param ip: Client ip address
:type ip: unicode
:param port: Client port
:type port: int
:rtype: None
|
[
"React",
"to",
"unjoin",
"packet",
"-",
"remove",
"a",
"client",
"from",
"this",
"server"
] |
2dde5a71913e4c7b22901cf05c6ecedd890919c4
|
https://github.com/the01/python-paps/blob/2dde5a71913e4c7b22901cf05c6ecedd890919c4/paps/si/app/sensorServer.py#L149-L190
|
242,478
|
the01/python-paps
|
paps/si/app/sensorServer.py
|
SensorServer._new_device_id
|
def _new_device_id(self, key):
"""
Generate a new device id or return existing device id for key
:param key: Key for device
:type key: unicode
:return: The device id
:rtype: int
"""
device_id = Id.SERVER + 1
if key in self._key2deviceId:
return self._key2deviceId[key]
while device_id in self._clients:
device_id += 1
return device_id
|
python
|
def _new_device_id(self, key):
"""
Generate a new device id or return existing device id for key
:param key: Key for device
:type key: unicode
:return: The device id
:rtype: int
"""
device_id = Id.SERVER + 1
if key in self._key2deviceId:
return self._key2deviceId[key]
while device_id in self._clients:
device_id += 1
return device_id
|
[
"def",
"_new_device_id",
"(",
"self",
",",
"key",
")",
":",
"device_id",
"=",
"Id",
".",
"SERVER",
"+",
"1",
"if",
"key",
"in",
"self",
".",
"_key2deviceId",
":",
"return",
"self",
".",
"_key2deviceId",
"[",
"key",
"]",
"while",
"device_id",
"in",
"self",
".",
"_clients",
":",
"device_id",
"+=",
"1",
"return",
"device_id"
] |
Generate a new device id or return existing device id for key
:param key: Key for device
:type key: unicode
:return: The device id
:rtype: int
|
[
"Generate",
"a",
"new",
"device",
"id",
"or",
"return",
"existing",
"device",
"id",
"for",
"key"
] |
2dde5a71913e4c7b22901cf05c6ecedd890919c4
|
https://github.com/the01/python-paps/blob/2dde5a71913e4c7b22901cf05c6ecedd890919c4/paps/si/app/sensorServer.py#L253-L267
|
242,479
|
the01/python-paps
|
paps/si/app/sensorServer.py
|
SensorServer._init_multicast_socket
|
def _init_multicast_socket(self):
"""
Init multicast socket
:rtype: None
"""
self.debug("()")
# Create a UDP socket
self._multicast_socket = socket.socket(
socket.AF_INET,
socket.SOCK_DGRAM
)
# Allow reuse of addresses
self._multicast_socket.setsockopt(
socket.SOL_SOCKET,
socket.SO_REUSEADDR,
1
)
# Set multicast interface to local_ip
self._multicast_socket.setsockopt(
socket.IPPROTO_IP,
socket.IP_MULTICAST_IF,
socket.inet_aton(self._multicast_ip)
)
# Set multicast time-to-live
# Should keep our multicast packets from escaping the local network
self._multicast_socket.setsockopt(
socket.IPPROTO_IP,
socket.IP_MULTICAST_TTL,
self._multicast_ttl
)
self._add_membership_multicast_socket()
# Bind socket
if platform.system().lower() == "darwin":
self._multicast_socket.bind(("0.0.0.0", self._multicast_bind_port))
else:
self._multicast_socket.bind(
(self._multicast_ip, self._multicast_bind_port)
)
self._listening.append(self._multicast_socket)
|
python
|
def _init_multicast_socket(self):
"""
Init multicast socket
:rtype: None
"""
self.debug("()")
# Create a UDP socket
self._multicast_socket = socket.socket(
socket.AF_INET,
socket.SOCK_DGRAM
)
# Allow reuse of addresses
self._multicast_socket.setsockopt(
socket.SOL_SOCKET,
socket.SO_REUSEADDR,
1
)
# Set multicast interface to local_ip
self._multicast_socket.setsockopt(
socket.IPPROTO_IP,
socket.IP_MULTICAST_IF,
socket.inet_aton(self._multicast_ip)
)
# Set multicast time-to-live
# Should keep our multicast packets from escaping the local network
self._multicast_socket.setsockopt(
socket.IPPROTO_IP,
socket.IP_MULTICAST_TTL,
self._multicast_ttl
)
self._add_membership_multicast_socket()
# Bind socket
if platform.system().lower() == "darwin":
self._multicast_socket.bind(("0.0.0.0", self._multicast_bind_port))
else:
self._multicast_socket.bind(
(self._multicast_ip, self._multicast_bind_port)
)
self._listening.append(self._multicast_socket)
|
[
"def",
"_init_multicast_socket",
"(",
"self",
")",
":",
"self",
".",
"debug",
"(",
"\"()\"",
")",
"# Create a UDP socket",
"self",
".",
"_multicast_socket",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_DGRAM",
")",
"# Allow reuse of addresses",
"self",
".",
"_multicast_socket",
".",
"setsockopt",
"(",
"socket",
".",
"SOL_SOCKET",
",",
"socket",
".",
"SO_REUSEADDR",
",",
"1",
")",
"# Set multicast interface to local_ip",
"self",
".",
"_multicast_socket",
".",
"setsockopt",
"(",
"socket",
".",
"IPPROTO_IP",
",",
"socket",
".",
"IP_MULTICAST_IF",
",",
"socket",
".",
"inet_aton",
"(",
"self",
".",
"_multicast_ip",
")",
")",
"# Set multicast time-to-live",
"# Should keep our multicast packets from escaping the local network",
"self",
".",
"_multicast_socket",
".",
"setsockopt",
"(",
"socket",
".",
"IPPROTO_IP",
",",
"socket",
".",
"IP_MULTICAST_TTL",
",",
"self",
".",
"_multicast_ttl",
")",
"self",
".",
"_add_membership_multicast_socket",
"(",
")",
"# Bind socket",
"if",
"platform",
".",
"system",
"(",
")",
".",
"lower",
"(",
")",
"==",
"\"darwin\"",
":",
"self",
".",
"_multicast_socket",
".",
"bind",
"(",
"(",
"\"0.0.0.0\"",
",",
"self",
".",
"_multicast_bind_port",
")",
")",
"else",
":",
"self",
".",
"_multicast_socket",
".",
"bind",
"(",
"(",
"self",
".",
"_multicast_ip",
",",
"self",
".",
"_multicast_bind_port",
")",
")",
"self",
".",
"_listening",
".",
"append",
"(",
"self",
".",
"_multicast_socket",
")"
] |
Init multicast socket
:rtype: None
|
[
"Init",
"multicast",
"socket"
] |
2dde5a71913e4c7b22901cf05c6ecedd890919c4
|
https://github.com/the01/python-paps/blob/2dde5a71913e4c7b22901cf05c6ecedd890919c4/paps/si/app/sensorServer.py#L269-L312
|
242,480
|
the01/python-paps
|
paps/si/app/sensorServer.py
|
SensorServer._shutdown_multicast_socket
|
def _shutdown_multicast_socket(self):
"""
Shutdown multicast socket
:rtype: None
"""
self.debug("()")
self._drop_membership_multicast_socket()
self._listening.remove(self._multicast_socket)
self._multicast_socket.close()
self._multicast_socket = None
|
python
|
def _shutdown_multicast_socket(self):
"""
Shutdown multicast socket
:rtype: None
"""
self.debug("()")
self._drop_membership_multicast_socket()
self._listening.remove(self._multicast_socket)
self._multicast_socket.close()
self._multicast_socket = None
|
[
"def",
"_shutdown_multicast_socket",
"(",
"self",
")",
":",
"self",
".",
"debug",
"(",
"\"()\"",
")",
"self",
".",
"_drop_membership_multicast_socket",
"(",
")",
"self",
".",
"_listening",
".",
"remove",
"(",
"self",
".",
"_multicast_socket",
")",
"self",
".",
"_multicast_socket",
".",
"close",
"(",
")",
"self",
".",
"_multicast_socket",
"=",
"None"
] |
Shutdown multicast socket
:rtype: None
|
[
"Shutdown",
"multicast",
"socket"
] |
2dde5a71913e4c7b22901cf05c6ecedd890919c4
|
https://github.com/the01/python-paps/blob/2dde5a71913e4c7b22901cf05c6ecedd890919c4/paps/si/app/sensorServer.py#L314-L324
|
242,481
|
the01/python-paps
|
paps/si/app/sensorServer.py
|
SensorServer._add_membership_multicast_socket
|
def _add_membership_multicast_socket(self):
"""
Make membership request to multicast
:rtype: None
"""
self._membership_request = socket.inet_aton(self._multicast_group) \
+ socket.inet_aton(self._multicast_ip)
# Send add membership request to socket
# See http://www.tldp.org/HOWTO/Multicast-HOWTO-6.html
# for explanation of sockopts
self._multicast_socket.setsockopt(
socket.IPPROTO_IP,
socket.IP_ADD_MEMBERSHIP,
self._membership_request
)
|
python
|
def _add_membership_multicast_socket(self):
"""
Make membership request to multicast
:rtype: None
"""
self._membership_request = socket.inet_aton(self._multicast_group) \
+ socket.inet_aton(self._multicast_ip)
# Send add membership request to socket
# See http://www.tldp.org/HOWTO/Multicast-HOWTO-6.html
# for explanation of sockopts
self._multicast_socket.setsockopt(
socket.IPPROTO_IP,
socket.IP_ADD_MEMBERSHIP,
self._membership_request
)
|
[
"def",
"_add_membership_multicast_socket",
"(",
"self",
")",
":",
"self",
".",
"_membership_request",
"=",
"socket",
".",
"inet_aton",
"(",
"self",
".",
"_multicast_group",
")",
"+",
"socket",
".",
"inet_aton",
"(",
"self",
".",
"_multicast_ip",
")",
"# Send add membership request to socket",
"# See http://www.tldp.org/HOWTO/Multicast-HOWTO-6.html",
"# for explanation of sockopts",
"self",
".",
"_multicast_socket",
".",
"setsockopt",
"(",
"socket",
".",
"IPPROTO_IP",
",",
"socket",
".",
"IP_ADD_MEMBERSHIP",
",",
"self",
".",
"_membership_request",
")"
] |
Make membership request to multicast
:rtype: None
|
[
"Make",
"membership",
"request",
"to",
"multicast"
] |
2dde5a71913e4c7b22901cf05c6ecedd890919c4
|
https://github.com/the01/python-paps/blob/2dde5a71913e4c7b22901cf05c6ecedd890919c4/paps/si/app/sensorServer.py#L326-L342
|
242,482
|
the01/python-paps
|
paps/si/app/sensorServer.py
|
SensorServer._drop_membership_multicast_socket
|
def _drop_membership_multicast_socket(self):
"""
Drop membership to multicast
:rtype: None
"""
# Leave group
self._multicast_socket.setsockopt(
socket.IPPROTO_IP,
socket.IP_DROP_MEMBERSHIP,
self._membership_request
)
self._membership_request = None
|
python
|
def _drop_membership_multicast_socket(self):
"""
Drop membership to multicast
:rtype: None
"""
# Leave group
self._multicast_socket.setsockopt(
socket.IPPROTO_IP,
socket.IP_DROP_MEMBERSHIP,
self._membership_request
)
self._membership_request = None
|
[
"def",
"_drop_membership_multicast_socket",
"(",
"self",
")",
":",
"# Leave group",
"self",
".",
"_multicast_socket",
".",
"setsockopt",
"(",
"socket",
".",
"IPPROTO_IP",
",",
"socket",
".",
"IP_DROP_MEMBERSHIP",
",",
"self",
".",
"_membership_request",
")",
"self",
".",
"_membership_request",
"=",
"None"
] |
Drop membership to multicast
:rtype: None
|
[
"Drop",
"membership",
"to",
"multicast"
] |
2dde5a71913e4c7b22901cf05c6ecedd890919c4
|
https://github.com/the01/python-paps/blob/2dde5a71913e4c7b22901cf05c6ecedd890919c4/paps/si/app/sensorServer.py#L344-L356
|
242,483
|
racker/torment
|
torment/helpers.py
|
evert
|
def evert(iterable: Iterable[Dict[str, Tuple]]) -> Iterable[Iterable[Dict[str, Any]]]:
'''Evert dictionaries with tuples.
Iterates over the list of dictionaries and everts them with their tuple
values. For example:
``[ { 'a': ( 1, 2, ), }, ]``
becomes
``[ ( { 'a': 1, }, ), ( { 'a', 2, }, ) ]``
The resulting iterable contains the same number of tuples as the
initial iterable had tuple elements. The number of dictionaries is the same
as the cartesian product of the initial iterable's tuple elements.
Parameters
----------
:``iterable``: list of dictionaries whose values are tuples
Return Value(s)
---------------
All combinations of the choices in the dictionaries.
'''
keys = list(itertools.chain.from_iterable([ _.keys() for _ in iterable ]))
for values in itertools.product(*[ list(*_.values()) for _ in iterable ]):
yield [ dict(( pair, )) for pair in zip(keys, values) ]
|
python
|
def evert(iterable: Iterable[Dict[str, Tuple]]) -> Iterable[Iterable[Dict[str, Any]]]:
'''Evert dictionaries with tuples.
Iterates over the list of dictionaries and everts them with their tuple
values. For example:
``[ { 'a': ( 1, 2, ), }, ]``
becomes
``[ ( { 'a': 1, }, ), ( { 'a', 2, }, ) ]``
The resulting iterable contains the same number of tuples as the
initial iterable had tuple elements. The number of dictionaries is the same
as the cartesian product of the initial iterable's tuple elements.
Parameters
----------
:``iterable``: list of dictionaries whose values are tuples
Return Value(s)
---------------
All combinations of the choices in the dictionaries.
'''
keys = list(itertools.chain.from_iterable([ _.keys() for _ in iterable ]))
for values in itertools.product(*[ list(*_.values()) for _ in iterable ]):
yield [ dict(( pair, )) for pair in zip(keys, values) ]
|
[
"def",
"evert",
"(",
"iterable",
":",
"Iterable",
"[",
"Dict",
"[",
"str",
",",
"Tuple",
"]",
"]",
")",
"->",
"Iterable",
"[",
"Iterable",
"[",
"Dict",
"[",
"str",
",",
"Any",
"]",
"]",
"]",
":",
"keys",
"=",
"list",
"(",
"itertools",
".",
"chain",
".",
"from_iterable",
"(",
"[",
"_",
".",
"keys",
"(",
")",
"for",
"_",
"in",
"iterable",
"]",
")",
")",
"for",
"values",
"in",
"itertools",
".",
"product",
"(",
"*",
"[",
"list",
"(",
"*",
"_",
".",
"values",
"(",
")",
")",
"for",
"_",
"in",
"iterable",
"]",
")",
":",
"yield",
"[",
"dict",
"(",
"(",
"pair",
",",
")",
")",
"for",
"pair",
"in",
"zip",
"(",
"keys",
",",
"values",
")",
"]"
] |
Evert dictionaries with tuples.
Iterates over the list of dictionaries and everts them with their tuple
values. For example:
``[ { 'a': ( 1, 2, ), }, ]``
becomes
``[ ( { 'a': 1, }, ), ( { 'a', 2, }, ) ]``
The resulting iterable contains the same number of tuples as the
initial iterable had tuple elements. The number of dictionaries is the same
as the cartesian product of the initial iterable's tuple elements.
Parameters
----------
:``iterable``: list of dictionaries whose values are tuples
Return Value(s)
---------------
All combinations of the choices in the dictionaries.
|
[
"Evert",
"dictionaries",
"with",
"tuples",
"."
] |
bd5d2f978324bf9b7360edfae76d853b226c63e1
|
https://github.com/racker/torment/blob/bd5d2f978324bf9b7360edfae76d853b226c63e1/torment/helpers.py#L32-L63
|
242,484
|
racker/torment
|
torment/helpers.py
|
extend
|
def extend(base: Dict[Any, Any], extension: Dict[Any, Any]) -> Dict[Any, Any]:
'''Extend base by updating with the extension.
**Arguments**
:``base``: dictionary to have keys updated or added
:``extension``: dictionary to update base with
**Return Value(s)**
Resulting dictionary from updating base with extension.
'''
_ = copy.deepcopy(base)
_.update(extension)
return _
|
python
|
def extend(base: Dict[Any, Any], extension: Dict[Any, Any]) -> Dict[Any, Any]:
'''Extend base by updating with the extension.
**Arguments**
:``base``: dictionary to have keys updated or added
:``extension``: dictionary to update base with
**Return Value(s)**
Resulting dictionary from updating base with extension.
'''
_ = copy.deepcopy(base)
_.update(extension)
return _
|
[
"def",
"extend",
"(",
"base",
":",
"Dict",
"[",
"Any",
",",
"Any",
"]",
",",
"extension",
":",
"Dict",
"[",
"Any",
",",
"Any",
"]",
")",
"->",
"Dict",
"[",
"Any",
",",
"Any",
"]",
":",
"_",
"=",
"copy",
".",
"deepcopy",
"(",
"base",
")",
"_",
".",
"update",
"(",
"extension",
")",
"return",
"_"
] |
Extend base by updating with the extension.
**Arguments**
:``base``: dictionary to have keys updated or added
:``extension``: dictionary to update base with
**Return Value(s)**
Resulting dictionary from updating base with extension.
|
[
"Extend",
"base",
"by",
"updating",
"with",
"the",
"extension",
"."
] |
bd5d2f978324bf9b7360edfae76d853b226c63e1
|
https://github.com/racker/torment/blob/bd5d2f978324bf9b7360edfae76d853b226c63e1/torment/helpers.py#L66-L83
|
242,485
|
racker/torment
|
torment/helpers.py
|
merge
|
def merge(base: Dict[Any, Any], extension: Dict[Any, Any]) -> Dict[Any, Any]:
'''Merge extension into base recursively.
**Argumetnts**
:``base``: dictionary to overlay values onto
:``extension``: dictionary to overlay with
**Return Value(s)**
Resulting dictionary from overlaying extension on base.
'''
_ = copy.deepcopy(base)
for key, value in extension.items():
if isinstance(value, Dict) and key in _:
_[key] = merge(_[key], value)
else:
_[key] = value
return _
|
python
|
def merge(base: Dict[Any, Any], extension: Dict[Any, Any]) -> Dict[Any, Any]:
'''Merge extension into base recursively.
**Argumetnts**
:``base``: dictionary to overlay values onto
:``extension``: dictionary to overlay with
**Return Value(s)**
Resulting dictionary from overlaying extension on base.
'''
_ = copy.deepcopy(base)
for key, value in extension.items():
if isinstance(value, Dict) and key in _:
_[key] = merge(_[key], value)
else:
_[key] = value
return _
|
[
"def",
"merge",
"(",
"base",
":",
"Dict",
"[",
"Any",
",",
"Any",
"]",
",",
"extension",
":",
"Dict",
"[",
"Any",
",",
"Any",
"]",
")",
"->",
"Dict",
"[",
"Any",
",",
"Any",
"]",
":",
"_",
"=",
"copy",
".",
"deepcopy",
"(",
"base",
")",
"for",
"key",
",",
"value",
"in",
"extension",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"Dict",
")",
"and",
"key",
"in",
"_",
":",
"_",
"[",
"key",
"]",
"=",
"merge",
"(",
"_",
"[",
"key",
"]",
",",
"value",
")",
"else",
":",
"_",
"[",
"key",
"]",
"=",
"value",
"return",
"_"
] |
Merge extension into base recursively.
**Argumetnts**
:``base``: dictionary to overlay values onto
:``extension``: dictionary to overlay with
**Return Value(s)**
Resulting dictionary from overlaying extension on base.
|
[
"Merge",
"extension",
"into",
"base",
"recursively",
"."
] |
bd5d2f978324bf9b7360edfae76d853b226c63e1
|
https://github.com/racker/torment/blob/bd5d2f978324bf9b7360edfae76d853b226c63e1/torment/helpers.py#L86-L108
|
242,486
|
racker/torment
|
torment/helpers.py
|
import_directory
|
def import_directory(module_basename: str, directory: str, sort_key = None) -> None:
'''Load all python modules in directory and directory's children.
Parameters
----------
:``module_basename``: module name prefix for loaded modules
:``directory``: directory to load python modules from
:``sort_key``: function to sort module names with before loading
'''
logger.info('loading submodules of %s', module_basename)
logger.info('loading modules from %s', directory)
filenames = itertools.chain(*[ [ os.path.join(_[0], filename) for filename in _[2] ] for _ in os.walk(directory) if len(_[2]) ])
modulenames = _filenames_to_modulenames(filenames, module_basename, directory)
for modulename in sorted(modulenames, key = sort_key):
try:
importlib.import_module(modulename)
except ImportError:
logger.warning('failed loading %s', modulename)
logger.exception('module loading failure')
else:
logger.info('successfully loaded %s', modulename)
|
python
|
def import_directory(module_basename: str, directory: str, sort_key = None) -> None:
'''Load all python modules in directory and directory's children.
Parameters
----------
:``module_basename``: module name prefix for loaded modules
:``directory``: directory to load python modules from
:``sort_key``: function to sort module names with before loading
'''
logger.info('loading submodules of %s', module_basename)
logger.info('loading modules from %s', directory)
filenames = itertools.chain(*[ [ os.path.join(_[0], filename) for filename in _[2] ] for _ in os.walk(directory) if len(_[2]) ])
modulenames = _filenames_to_modulenames(filenames, module_basename, directory)
for modulename in sorted(modulenames, key = sort_key):
try:
importlib.import_module(modulename)
except ImportError:
logger.warning('failed loading %s', modulename)
logger.exception('module loading failure')
else:
logger.info('successfully loaded %s', modulename)
|
[
"def",
"import_directory",
"(",
"module_basename",
":",
"str",
",",
"directory",
":",
"str",
",",
"sort_key",
"=",
"None",
")",
"->",
"None",
":",
"logger",
".",
"info",
"(",
"'loading submodules of %s'",
",",
"module_basename",
")",
"logger",
".",
"info",
"(",
"'loading modules from %s'",
",",
"directory",
")",
"filenames",
"=",
"itertools",
".",
"chain",
"(",
"*",
"[",
"[",
"os",
".",
"path",
".",
"join",
"(",
"_",
"[",
"0",
"]",
",",
"filename",
")",
"for",
"filename",
"in",
"_",
"[",
"2",
"]",
"]",
"for",
"_",
"in",
"os",
".",
"walk",
"(",
"directory",
")",
"if",
"len",
"(",
"_",
"[",
"2",
"]",
")",
"]",
")",
"modulenames",
"=",
"_filenames_to_modulenames",
"(",
"filenames",
",",
"module_basename",
",",
"directory",
")",
"for",
"modulename",
"in",
"sorted",
"(",
"modulenames",
",",
"key",
"=",
"sort_key",
")",
":",
"try",
":",
"importlib",
".",
"import_module",
"(",
"modulename",
")",
"except",
"ImportError",
":",
"logger",
".",
"warning",
"(",
"'failed loading %s'",
",",
"modulename",
")",
"logger",
".",
"exception",
"(",
"'module loading failure'",
")",
"else",
":",
"logger",
".",
"info",
"(",
"'successfully loaded %s'",
",",
"modulename",
")"
] |
Load all python modules in directory and directory's children.
Parameters
----------
:``module_basename``: module name prefix for loaded modules
:``directory``: directory to load python modules from
:``sort_key``: function to sort module names with before loading
|
[
"Load",
"all",
"python",
"modules",
"in",
"directory",
"and",
"directory",
"s",
"children",
"."
] |
bd5d2f978324bf9b7360edfae76d853b226c63e1
|
https://github.com/racker/torment/blob/bd5d2f978324bf9b7360edfae76d853b226c63e1/torment/helpers.py#L112-L137
|
242,487
|
racker/torment
|
torment/helpers.py
|
_filenames_to_modulenames
|
def _filenames_to_modulenames(filenames: Iterable[str], modulename_prefix: str, filename_prefix: str = '') -> Iterable[str]:
'''Convert given filenames to module names.
Any filename that does not have a corresponding module name will be dropped
from the result (i.e. __init__.py).
Parameters
----------
:``filename_prefix``: a prefix to drop from all filenames (typically a
common directory); defaults to ''
:``filenames``: the filenames to transform into module names
:``modulename_prefix``: a prefix to add to all module names
Return Value(s)
---------------
A list of modulenames corresponding to all filenames (for legal module names).
'''
modulenames = [] # type: Iterable[str]
for filename in filenames:
if not filename.endswith('.py'):
continue
name = filename
name = name.replace(filename_prefix, '')
name = name.replace('__init__.py', '')
name = name.replace('.py', '')
name = name.replace('/', '.')
name = name.strip('.')
if not len(name):
continue
if not modulename_prefix.endswith('.'):
modulename_prefix += '.'
name = modulename_prefix + name
known_symbols = set()
name = '.'.join([ _ for _ in name.split('.') if _ not in known_symbols and not known_symbols.add(_) ])
if len(name):
modulenames.append(name)
return modulenames
|
python
|
def _filenames_to_modulenames(filenames: Iterable[str], modulename_prefix: str, filename_prefix: str = '') -> Iterable[str]:
'''Convert given filenames to module names.
Any filename that does not have a corresponding module name will be dropped
from the result (i.e. __init__.py).
Parameters
----------
:``filename_prefix``: a prefix to drop from all filenames (typically a
common directory); defaults to ''
:``filenames``: the filenames to transform into module names
:``modulename_prefix``: a prefix to add to all module names
Return Value(s)
---------------
A list of modulenames corresponding to all filenames (for legal module names).
'''
modulenames = [] # type: Iterable[str]
for filename in filenames:
if not filename.endswith('.py'):
continue
name = filename
name = name.replace(filename_prefix, '')
name = name.replace('__init__.py', '')
name = name.replace('.py', '')
name = name.replace('/', '.')
name = name.strip('.')
if not len(name):
continue
if not modulename_prefix.endswith('.'):
modulename_prefix += '.'
name = modulename_prefix + name
known_symbols = set()
name = '.'.join([ _ for _ in name.split('.') if _ not in known_symbols and not known_symbols.add(_) ])
if len(name):
modulenames.append(name)
return modulenames
|
[
"def",
"_filenames_to_modulenames",
"(",
"filenames",
":",
"Iterable",
"[",
"str",
"]",
",",
"modulename_prefix",
":",
"str",
",",
"filename_prefix",
":",
"str",
"=",
"''",
")",
"->",
"Iterable",
"[",
"str",
"]",
":",
"modulenames",
"=",
"[",
"]",
"# type: Iterable[str]",
"for",
"filename",
"in",
"filenames",
":",
"if",
"not",
"filename",
".",
"endswith",
"(",
"'.py'",
")",
":",
"continue",
"name",
"=",
"filename",
"name",
"=",
"name",
".",
"replace",
"(",
"filename_prefix",
",",
"''",
")",
"name",
"=",
"name",
".",
"replace",
"(",
"'__init__.py'",
",",
"''",
")",
"name",
"=",
"name",
".",
"replace",
"(",
"'.py'",
",",
"''",
")",
"name",
"=",
"name",
".",
"replace",
"(",
"'/'",
",",
"'.'",
")",
"name",
"=",
"name",
".",
"strip",
"(",
"'.'",
")",
"if",
"not",
"len",
"(",
"name",
")",
":",
"continue",
"if",
"not",
"modulename_prefix",
".",
"endswith",
"(",
"'.'",
")",
":",
"modulename_prefix",
"+=",
"'.'",
"name",
"=",
"modulename_prefix",
"+",
"name",
"known_symbols",
"=",
"set",
"(",
")",
"name",
"=",
"'.'",
".",
"join",
"(",
"[",
"_",
"for",
"_",
"in",
"name",
".",
"split",
"(",
"'.'",
")",
"if",
"_",
"not",
"in",
"known_symbols",
"and",
"not",
"known_symbols",
".",
"add",
"(",
"_",
")",
"]",
")",
"if",
"len",
"(",
"name",
")",
":",
"modulenames",
".",
"append",
"(",
"name",
")",
"return",
"modulenames"
] |
Convert given filenames to module names.
Any filename that does not have a corresponding module name will be dropped
from the result (i.e. __init__.py).
Parameters
----------
:``filename_prefix``: a prefix to drop from all filenames (typically a
common directory); defaults to ''
:``filenames``: the filenames to transform into module names
:``modulename_prefix``: a prefix to add to all module names
Return Value(s)
---------------
A list of modulenames corresponding to all filenames (for legal module names).
|
[
"Convert",
"given",
"filenames",
"to",
"module",
"names",
"."
] |
bd5d2f978324bf9b7360edfae76d853b226c63e1
|
https://github.com/racker/torment/blob/bd5d2f978324bf9b7360edfae76d853b226c63e1/torment/helpers.py#L158-L208
|
242,488
|
za-creature/gulpless
|
gulpless/proxy.py
|
Proxy.on_any_event
|
def on_any_event(self, event):
"""Called whenever a FS event occurs."""
self.updated = True
if self._changed:
self._changed()
|
python
|
def on_any_event(self, event):
"""Called whenever a FS event occurs."""
self.updated = True
if self._changed:
self._changed()
|
[
"def",
"on_any_event",
"(",
"self",
",",
"event",
")",
":",
"self",
".",
"updated",
"=",
"True",
"if",
"self",
".",
"_changed",
":",
"self",
".",
"_changed",
"(",
")"
] |
Called whenever a FS event occurs.
|
[
"Called",
"whenever",
"a",
"FS",
"event",
"occurs",
"."
] |
fd73907dbe86880086719816bb042233f85121f6
|
https://github.com/za-creature/gulpless/blob/fd73907dbe86880086719816bb042233f85121f6/gulpless/proxy.py#L19-L23
|
242,489
|
racker/torment
|
torment/decorators.py
|
log
|
def log(prefix = ''):
'''Add start and stop logging messages to the function.
Parameters
----------
:``prefix``: a prefix for the function name (optional)
'''
function = None
if inspect.isfunction(prefix):
prefix, function = '', prefix
def _(function):
@functools.wraps(function, assigned = functools.WRAPPER_ASSIGNMENTS + ( '__file__', ))
def wrapper(*args, **kwargs):
name, my_args = function.__name__, args
if inspect.ismethod(function):
name = function.__self__.__class__.__name__ + '.' + function.__name__
elif len(args):
members = dict(inspect.getmembers(args[0], predicate = lambda _: inspect.ismethod(_) and _.__name__ == function.__name__))
logger.debug('members.keys(): %s', members.keys())
if len(members):
name, my_args = args[0].__class__.__name__ + '.' + function.__name__, args[1:]
format_args = (
prefix + name,
', '.join(list(map(str, my_args)) + [ ' = '.join(map(str, item)) for item in kwargs.items() ]),
)
logger.info('STARTING: %s(%s)', *format_args)
try:
return function(*args, **kwargs)
except:
logger.exception('EXCEPTION: %s(%s)', *format_args)
raise
finally:
logger.info('STOPPING: %s(%s)', *format_args)
return wrapper
if function is not None:
_ = _(function)
return _
|
python
|
def log(prefix = ''):
'''Add start and stop logging messages to the function.
Parameters
----------
:``prefix``: a prefix for the function name (optional)
'''
function = None
if inspect.isfunction(prefix):
prefix, function = '', prefix
def _(function):
@functools.wraps(function, assigned = functools.WRAPPER_ASSIGNMENTS + ( '__file__', ))
def wrapper(*args, **kwargs):
name, my_args = function.__name__, args
if inspect.ismethod(function):
name = function.__self__.__class__.__name__ + '.' + function.__name__
elif len(args):
members = dict(inspect.getmembers(args[0], predicate = lambda _: inspect.ismethod(_) and _.__name__ == function.__name__))
logger.debug('members.keys(): %s', members.keys())
if len(members):
name, my_args = args[0].__class__.__name__ + '.' + function.__name__, args[1:]
format_args = (
prefix + name,
', '.join(list(map(str, my_args)) + [ ' = '.join(map(str, item)) for item in kwargs.items() ]),
)
logger.info('STARTING: %s(%s)', *format_args)
try:
return function(*args, **kwargs)
except:
logger.exception('EXCEPTION: %s(%s)', *format_args)
raise
finally:
logger.info('STOPPING: %s(%s)', *format_args)
return wrapper
if function is not None:
_ = _(function)
return _
|
[
"def",
"log",
"(",
"prefix",
"=",
"''",
")",
":",
"function",
"=",
"None",
"if",
"inspect",
".",
"isfunction",
"(",
"prefix",
")",
":",
"prefix",
",",
"function",
"=",
"''",
",",
"prefix",
"def",
"_",
"(",
"function",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"function",
",",
"assigned",
"=",
"functools",
".",
"WRAPPER_ASSIGNMENTS",
"+",
"(",
"'__file__'",
",",
")",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"name",
",",
"my_args",
"=",
"function",
".",
"__name__",
",",
"args",
"if",
"inspect",
".",
"ismethod",
"(",
"function",
")",
":",
"name",
"=",
"function",
".",
"__self__",
".",
"__class__",
".",
"__name__",
"+",
"'.'",
"+",
"function",
".",
"__name__",
"elif",
"len",
"(",
"args",
")",
":",
"members",
"=",
"dict",
"(",
"inspect",
".",
"getmembers",
"(",
"args",
"[",
"0",
"]",
",",
"predicate",
"=",
"lambda",
"_",
":",
"inspect",
".",
"ismethod",
"(",
"_",
")",
"and",
"_",
".",
"__name__",
"==",
"function",
".",
"__name__",
")",
")",
"logger",
".",
"debug",
"(",
"'members.keys(): %s'",
",",
"members",
".",
"keys",
"(",
")",
")",
"if",
"len",
"(",
"members",
")",
":",
"name",
",",
"my_args",
"=",
"args",
"[",
"0",
"]",
".",
"__class__",
".",
"__name__",
"+",
"'.'",
"+",
"function",
".",
"__name__",
",",
"args",
"[",
"1",
":",
"]",
"format_args",
"=",
"(",
"prefix",
"+",
"name",
",",
"', '",
".",
"join",
"(",
"list",
"(",
"map",
"(",
"str",
",",
"my_args",
")",
")",
"+",
"[",
"' = '",
".",
"join",
"(",
"map",
"(",
"str",
",",
"item",
")",
")",
"for",
"item",
"in",
"kwargs",
".",
"items",
"(",
")",
"]",
")",
",",
")",
"logger",
".",
"info",
"(",
"'STARTING: %s(%s)'",
",",
"*",
"format_args",
")",
"try",
":",
"return",
"function",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
":",
"logger",
".",
"exception",
"(",
"'EXCEPTION: %s(%s)'",
",",
"*",
"format_args",
")",
"raise",
"finally",
":",
"logger",
".",
"info",
"(",
"'STOPPING: %s(%s)'",
",",
"*",
"format_args",
")",
"return",
"wrapper",
"if",
"function",
"is",
"not",
"None",
":",
"_",
"=",
"_",
"(",
"function",
")",
"return",
"_"
] |
Add start and stop logging messages to the function.
Parameters
----------
:``prefix``: a prefix for the function name (optional)
|
[
"Add",
"start",
"and",
"stop",
"logging",
"messages",
"to",
"the",
"function",
"."
] |
bd5d2f978324bf9b7360edfae76d853b226c63e1
|
https://github.com/racker/torment/blob/bd5d2f978324bf9b7360edfae76d853b226c63e1/torment/decorators.py#L28-L77
|
242,490
|
Metatab/tableintuit
|
tableintuit/rows.py
|
RowIntuiter.spec
|
def spec(self):
"""Return a dict with values that can be fed directly into SelectiveRowGenerator"""
return dict(
headers=self.header_lines,
start=self.start_line,
comments=self.comment_lines,
end=self.end_line
)
|
python
|
def spec(self):
"""Return a dict with values that can be fed directly into SelectiveRowGenerator"""
return dict(
headers=self.header_lines,
start=self.start_line,
comments=self.comment_lines,
end=self.end_line
)
|
[
"def",
"spec",
"(",
"self",
")",
":",
"return",
"dict",
"(",
"headers",
"=",
"self",
".",
"header_lines",
",",
"start",
"=",
"self",
".",
"start_line",
",",
"comments",
"=",
"self",
".",
"comment_lines",
",",
"end",
"=",
"self",
".",
"end_line",
")"
] |
Return a dict with values that can be fed directly into SelectiveRowGenerator
|
[
"Return",
"a",
"dict",
"with",
"values",
"that",
"can",
"be",
"fed",
"directly",
"into",
"SelectiveRowGenerator"
] |
9a3d500d5d90e44e6637dd17ca4c8dae474d6d4c
|
https://github.com/Metatab/tableintuit/blob/9a3d500d5d90e44e6637dd17ca4c8dae474d6d4c/tableintuit/rows.py#L70-L77
|
242,491
|
Metatab/tableintuit
|
tableintuit/rows.py
|
RowIntuiter.picture
|
def picture(self, row):
"""Create a simplified character representation of the data row, which can be pattern matched
with a regex """
template = '_Xn'
types = (type(None), binary_type, int)
def guess_type(v):
try:
v = text_type(v).strip()
except ValueError:
v = binary_type(v).strip()
#v = v.decode('ascii', 'replace').strip()
if not bool(v):
return type(None)
for t in (float, int, binary_type, text_type):
try:
return type(t(v))
except:
pass
def p(e):
tm = t = None
try:
t = guess_type(e)
tm = self.type_map.get(t, t)
return template[types.index(tm)]
except ValueError as e:
raise ValueError("Type '{}'/'{}' not in the types list: {} ({})".format(t, tm, types, e))
return ''.join(p(e) for e in row)
|
python
|
def picture(self, row):
"""Create a simplified character representation of the data row, which can be pattern matched
with a regex """
template = '_Xn'
types = (type(None), binary_type, int)
def guess_type(v):
try:
v = text_type(v).strip()
except ValueError:
v = binary_type(v).strip()
#v = v.decode('ascii', 'replace').strip()
if not bool(v):
return type(None)
for t in (float, int, binary_type, text_type):
try:
return type(t(v))
except:
pass
def p(e):
tm = t = None
try:
t = guess_type(e)
tm = self.type_map.get(t, t)
return template[types.index(tm)]
except ValueError as e:
raise ValueError("Type '{}'/'{}' not in the types list: {} ({})".format(t, tm, types, e))
return ''.join(p(e) for e in row)
|
[
"def",
"picture",
"(",
"self",
",",
"row",
")",
":",
"template",
"=",
"'_Xn'",
"types",
"=",
"(",
"type",
"(",
"None",
")",
",",
"binary_type",
",",
"int",
")",
"def",
"guess_type",
"(",
"v",
")",
":",
"try",
":",
"v",
"=",
"text_type",
"(",
"v",
")",
".",
"strip",
"(",
")",
"except",
"ValueError",
":",
"v",
"=",
"binary_type",
"(",
"v",
")",
".",
"strip",
"(",
")",
"#v = v.decode('ascii', 'replace').strip()",
"if",
"not",
"bool",
"(",
"v",
")",
":",
"return",
"type",
"(",
"None",
")",
"for",
"t",
"in",
"(",
"float",
",",
"int",
",",
"binary_type",
",",
"text_type",
")",
":",
"try",
":",
"return",
"type",
"(",
"t",
"(",
"v",
")",
")",
"except",
":",
"pass",
"def",
"p",
"(",
"e",
")",
":",
"tm",
"=",
"t",
"=",
"None",
"try",
":",
"t",
"=",
"guess_type",
"(",
"e",
")",
"tm",
"=",
"self",
".",
"type_map",
".",
"get",
"(",
"t",
",",
"t",
")",
"return",
"template",
"[",
"types",
".",
"index",
"(",
"tm",
")",
"]",
"except",
"ValueError",
"as",
"e",
":",
"raise",
"ValueError",
"(",
"\"Type '{}'/'{}' not in the types list: {} ({})\"",
".",
"format",
"(",
"t",
",",
"tm",
",",
"types",
",",
"e",
")",
")",
"return",
"''",
".",
"join",
"(",
"p",
"(",
"e",
")",
"for",
"e",
"in",
"row",
")"
] |
Create a simplified character representation of the data row, which can be pattern matched
with a regex
|
[
"Create",
"a",
"simplified",
"character",
"representation",
"of",
"the",
"data",
"row",
"which",
"can",
"be",
"pattern",
"matched",
"with",
"a",
"regex"
] |
9a3d500d5d90e44e6637dd17ca4c8dae474d6d4c
|
https://github.com/Metatab/tableintuit/blob/9a3d500d5d90e44e6637dd17ca4c8dae474d6d4c/tableintuit/rows.py#L79-L113
|
242,492
|
Metatab/tableintuit
|
tableintuit/rows.py
|
RowIntuiter.coalesce_headers
|
def coalesce_headers(cls, header_lines):
"""Collects headers that are spread across multiple lines into a single row"""
header_lines = [list(hl) for hl in header_lines if bool(hl)]
if len(header_lines) == 0:
return []
if len(header_lines) == 1:
return header_lines[0]
# If there are gaps in the values of a line, copy them forward, so there
# is some value in every position
for hl in header_lines:
last = None
for i in range(len(hl)):
hli = text_type(hl[i])
if not hli.strip():
hl[i] = last
else:
last = hli
headers = [' '.join(text_type(col_val).strip() if col_val else '' for col_val in col_set)
for col_set in zip(*header_lines)]
headers = [slugify(h.strip()) for h in headers]
return headers
|
python
|
def coalesce_headers(cls, header_lines):
"""Collects headers that are spread across multiple lines into a single row"""
header_lines = [list(hl) for hl in header_lines if bool(hl)]
if len(header_lines) == 0:
return []
if len(header_lines) == 1:
return header_lines[0]
# If there are gaps in the values of a line, copy them forward, so there
# is some value in every position
for hl in header_lines:
last = None
for i in range(len(hl)):
hli = text_type(hl[i])
if not hli.strip():
hl[i] = last
else:
last = hli
headers = [' '.join(text_type(col_val).strip() if col_val else '' for col_val in col_set)
for col_set in zip(*header_lines)]
headers = [slugify(h.strip()) for h in headers]
return headers
|
[
"def",
"coalesce_headers",
"(",
"cls",
",",
"header_lines",
")",
":",
"header_lines",
"=",
"[",
"list",
"(",
"hl",
")",
"for",
"hl",
"in",
"header_lines",
"if",
"bool",
"(",
"hl",
")",
"]",
"if",
"len",
"(",
"header_lines",
")",
"==",
"0",
":",
"return",
"[",
"]",
"if",
"len",
"(",
"header_lines",
")",
"==",
"1",
":",
"return",
"header_lines",
"[",
"0",
"]",
"# If there are gaps in the values of a line, copy them forward, so there",
"# is some value in every position",
"for",
"hl",
"in",
"header_lines",
":",
"last",
"=",
"None",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"hl",
")",
")",
":",
"hli",
"=",
"text_type",
"(",
"hl",
"[",
"i",
"]",
")",
"if",
"not",
"hli",
".",
"strip",
"(",
")",
":",
"hl",
"[",
"i",
"]",
"=",
"last",
"else",
":",
"last",
"=",
"hli",
"headers",
"=",
"[",
"' '",
".",
"join",
"(",
"text_type",
"(",
"col_val",
")",
".",
"strip",
"(",
")",
"if",
"col_val",
"else",
"''",
"for",
"col_val",
"in",
"col_set",
")",
"for",
"col_set",
"in",
"zip",
"(",
"*",
"header_lines",
")",
"]",
"headers",
"=",
"[",
"slugify",
"(",
"h",
".",
"strip",
"(",
")",
")",
"for",
"h",
"in",
"headers",
"]",
"return",
"headers"
] |
Collects headers that are spread across multiple lines into a single row
|
[
"Collects",
"headers",
"that",
"are",
"spread",
"across",
"multiple",
"lines",
"into",
"a",
"single",
"row"
] |
9a3d500d5d90e44e6637dd17ca4c8dae474d6d4c
|
https://github.com/Metatab/tableintuit/blob/9a3d500d5d90e44e6637dd17ca4c8dae474d6d4c/tableintuit/rows.py#L291-L318
|
242,493
|
csaez/wishlib
|
wishlib/utils.py
|
map_recursive
|
def map_recursive(function, iterable):
"""
Apply function recursively to every item or value of iterable and returns a
new iterable object with the results.
"""
if isiterable(iterable):
dataOut = iterable.__class__()
for i in iterable:
if isinstance(dataOut, dict):
dataOut[i] = map_recursive(function, iterable[i])
else:
# convert to list and append
if not isinstance(dataOut, list):
dataOut = list(dataOut)
dataOut.append(map_recursive(function, i))
return dataOut
return function(iterable)
|
python
|
def map_recursive(function, iterable):
"""
Apply function recursively to every item or value of iterable and returns a
new iterable object with the results.
"""
if isiterable(iterable):
dataOut = iterable.__class__()
for i in iterable:
if isinstance(dataOut, dict):
dataOut[i] = map_recursive(function, iterable[i])
else:
# convert to list and append
if not isinstance(dataOut, list):
dataOut = list(dataOut)
dataOut.append(map_recursive(function, i))
return dataOut
return function(iterable)
|
[
"def",
"map_recursive",
"(",
"function",
",",
"iterable",
")",
":",
"if",
"isiterable",
"(",
"iterable",
")",
":",
"dataOut",
"=",
"iterable",
".",
"__class__",
"(",
")",
"for",
"i",
"in",
"iterable",
":",
"if",
"isinstance",
"(",
"dataOut",
",",
"dict",
")",
":",
"dataOut",
"[",
"i",
"]",
"=",
"map_recursive",
"(",
"function",
",",
"iterable",
"[",
"i",
"]",
")",
"else",
":",
"# convert to list and append",
"if",
"not",
"isinstance",
"(",
"dataOut",
",",
"list",
")",
":",
"dataOut",
"=",
"list",
"(",
"dataOut",
")",
"dataOut",
".",
"append",
"(",
"map_recursive",
"(",
"function",
",",
"i",
")",
")",
"return",
"dataOut",
"return",
"function",
"(",
"iterable",
")"
] |
Apply function recursively to every item or value of iterable and returns a
new iterable object with the results.
|
[
"Apply",
"function",
"recursively",
"to",
"every",
"item",
"or",
"value",
"of",
"iterable",
"and",
"returns",
"a",
"new",
"iterable",
"object",
"with",
"the",
"results",
"."
] |
c212fa7875006a332a4cefbf69885ced9647bc2f
|
https://github.com/csaez/wishlib/blob/c212fa7875006a332a4cefbf69885ced9647bc2f/wishlib/utils.py#L58-L74
|
242,494
|
Nekroze/librarian
|
librarian/deck.py
|
Deck.get_card
|
def get_card(self, index=-1, cache=True, remove=True):
"""
Retrieve a card any number of cards from the top. Returns a
``Card`` object loaded from a library if one is specified otherwise
just it will simply return its code.
If `index` is not set then the top card will be retrieved.
If cache is set to True (the default) it will tell the library to cache
the returned card for faster look-ups in the future.
If remove is true then the card will be removed from the deck before
returning it.
"""
if len(self.cards) < index:
return None
retriever = self.cards.pop if remove else self.cards.__getitem__
code = retriever(index)
if self.library:
return self.library.load_card(code, cache)
else:
return code
|
python
|
def get_card(self, index=-1, cache=True, remove=True):
"""
Retrieve a card any number of cards from the top. Returns a
``Card`` object loaded from a library if one is specified otherwise
just it will simply return its code.
If `index` is not set then the top card will be retrieved.
If cache is set to True (the default) it will tell the library to cache
the returned card for faster look-ups in the future.
If remove is true then the card will be removed from the deck before
returning it.
"""
if len(self.cards) < index:
return None
retriever = self.cards.pop if remove else self.cards.__getitem__
code = retriever(index)
if self.library:
return self.library.load_card(code, cache)
else:
return code
|
[
"def",
"get_card",
"(",
"self",
",",
"index",
"=",
"-",
"1",
",",
"cache",
"=",
"True",
",",
"remove",
"=",
"True",
")",
":",
"if",
"len",
"(",
"self",
".",
"cards",
")",
"<",
"index",
":",
"return",
"None",
"retriever",
"=",
"self",
".",
"cards",
".",
"pop",
"if",
"remove",
"else",
"self",
".",
"cards",
".",
"__getitem__",
"code",
"=",
"retriever",
"(",
"index",
")",
"if",
"self",
".",
"library",
":",
"return",
"self",
".",
"library",
".",
"load_card",
"(",
"code",
",",
"cache",
")",
"else",
":",
"return",
"code"
] |
Retrieve a card any number of cards from the top. Returns a
``Card`` object loaded from a library if one is specified otherwise
just it will simply return its code.
If `index` is not set then the top card will be retrieved.
If cache is set to True (the default) it will tell the library to cache
the returned card for faster look-ups in the future.
If remove is true then the card will be removed from the deck before
returning it.
|
[
"Retrieve",
"a",
"card",
"any",
"number",
"of",
"cards",
"from",
"the",
"top",
".",
"Returns",
"a",
"Card",
"object",
"loaded",
"from",
"a",
"library",
"if",
"one",
"is",
"specified",
"otherwise",
"just",
"it",
"will",
"simply",
"return",
"its",
"code",
"."
] |
5d3da2980d91a637f80ad7164fbf204a2dd2bd58
|
https://github.com/Nekroze/librarian/blob/5d3da2980d91a637f80ad7164fbf204a2dd2bd58/librarian/deck.py#L22-L45
|
242,495
|
Nekroze/librarian
|
librarian/deck.py
|
Deck.top_cards
|
def top_cards(self, number=1, cache=True, remove=True):
"""
Retrieve the top number of cards as ``Librarian.Card`` objects in a
list in order of top to bottom most card. Uses the decks
``.get_card`` and passes along the cache and remove arguments.
"""
getter = partial(self.get_card(cache=cache, remove=remove))
return [getter(index=i) for i in range(number)]
|
python
|
def top_cards(self, number=1, cache=True, remove=True):
"""
Retrieve the top number of cards as ``Librarian.Card`` objects in a
list in order of top to bottom most card. Uses the decks
``.get_card`` and passes along the cache and remove arguments.
"""
getter = partial(self.get_card(cache=cache, remove=remove))
return [getter(index=i) for i in range(number)]
|
[
"def",
"top_cards",
"(",
"self",
",",
"number",
"=",
"1",
",",
"cache",
"=",
"True",
",",
"remove",
"=",
"True",
")",
":",
"getter",
"=",
"partial",
"(",
"self",
".",
"get_card",
"(",
"cache",
"=",
"cache",
",",
"remove",
"=",
"remove",
")",
")",
"return",
"[",
"getter",
"(",
"index",
"=",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"number",
")",
"]"
] |
Retrieve the top number of cards as ``Librarian.Card`` objects in a
list in order of top to bottom most card. Uses the decks
``.get_card`` and passes along the cache and remove arguments.
|
[
"Retrieve",
"the",
"top",
"number",
"of",
"cards",
"as",
"Librarian",
".",
"Card",
"objects",
"in",
"a",
"list",
"in",
"order",
"of",
"top",
"to",
"bottom",
"most",
"card",
".",
"Uses",
"the",
"decks",
".",
"get_card",
"and",
"passes",
"along",
"the",
"cache",
"and",
"remove",
"arguments",
"."
] |
5d3da2980d91a637f80ad7164fbf204a2dd2bd58
|
https://github.com/Nekroze/librarian/blob/5d3da2980d91a637f80ad7164fbf204a2dd2bd58/librarian/deck.py#L47-L54
|
242,496
|
Nekroze/librarian
|
librarian/deck.py
|
Deck.move_top_cards
|
def move_top_cards(self, other, number=1):
"""
Move the top `number` of cards to the top of some `other` deck.
By default only one card will be moved if `number` is not specified.
"""
other.cards.append(reversed(self.cards[-number:]))
|
python
|
def move_top_cards(self, other, number=1):
"""
Move the top `number` of cards to the top of some `other` deck.
By default only one card will be moved if `number` is not specified.
"""
other.cards.append(reversed(self.cards[-number:]))
|
[
"def",
"move_top_cards",
"(",
"self",
",",
"other",
",",
"number",
"=",
"1",
")",
":",
"other",
".",
"cards",
".",
"append",
"(",
"reversed",
"(",
"self",
".",
"cards",
"[",
"-",
"number",
":",
"]",
")",
")"
] |
Move the top `number` of cards to the top of some `other` deck.
By default only one card will be moved if `number` is not specified.
|
[
"Move",
"the",
"top",
"number",
"of",
"cards",
"to",
"the",
"top",
"of",
"some",
"other",
"deck",
"."
] |
5d3da2980d91a637f80ad7164fbf204a2dd2bd58
|
https://github.com/Nekroze/librarian/blob/5d3da2980d91a637f80ad7164fbf204a2dd2bd58/librarian/deck.py#L56-L62
|
242,497
|
Nekroze/librarian
|
librarian/deck.py
|
Deck.contians_attribute
|
def contians_attribute(self, attribute):
"""
Returns how many cards in the deck have the specified attribute.
This method requires a library to be stored in the deck instance and
will return `None` if there is no library.
"""
if self.library is None:
return 0
load = self.library.load_card
matches = 0
for code in self.cards:
card = load(code)
if card.has_attribute(attribute):
matches += 1
return matches
|
python
|
def contians_attribute(self, attribute):
"""
Returns how many cards in the deck have the specified attribute.
This method requires a library to be stored in the deck instance and
will return `None` if there is no library.
"""
if self.library is None:
return 0
load = self.library.load_card
matches = 0
for code in self.cards:
card = load(code)
if card.has_attribute(attribute):
matches += 1
return matches
|
[
"def",
"contians_attribute",
"(",
"self",
",",
"attribute",
")",
":",
"if",
"self",
".",
"library",
"is",
"None",
":",
"return",
"0",
"load",
"=",
"self",
".",
"library",
".",
"load_card",
"matches",
"=",
"0",
"for",
"code",
"in",
"self",
".",
"cards",
":",
"card",
"=",
"load",
"(",
"code",
")",
"if",
"card",
".",
"has_attribute",
"(",
"attribute",
")",
":",
"matches",
"+=",
"1",
"return",
"matches"
] |
Returns how many cards in the deck have the specified attribute.
This method requires a library to be stored in the deck instance and
will return `None` if there is no library.
|
[
"Returns",
"how",
"many",
"cards",
"in",
"the",
"deck",
"have",
"the",
"specified",
"attribute",
"."
] |
5d3da2980d91a637f80ad7164fbf204a2dd2bd58
|
https://github.com/Nekroze/librarian/blob/5d3da2980d91a637f80ad7164fbf204a2dd2bd58/librarian/deck.py#L68-L84
|
242,498
|
Nekroze/librarian
|
librarian/deck.py
|
Deck.contains_info
|
def contains_info(self, key, value):
"""
Returns how many cards in the deck have the specified value under the
specified key in their info data.
This method requires a library to be stored in the deck instance and
will return `None` if there is no library.
"""
if self.library is None:
return 0
load = self.library.load_card
matches = 0
for code in self.cards:
card = load(code)
if card.get_info(key) == value:
matches += 1
return matches
|
python
|
def contains_info(self, key, value):
"""
Returns how many cards in the deck have the specified value under the
specified key in their info data.
This method requires a library to be stored in the deck instance and
will return `None` if there is no library.
"""
if self.library is None:
return 0
load = self.library.load_card
matches = 0
for code in self.cards:
card = load(code)
if card.get_info(key) == value:
matches += 1
return matches
|
[
"def",
"contains_info",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"if",
"self",
".",
"library",
"is",
"None",
":",
"return",
"0",
"load",
"=",
"self",
".",
"library",
".",
"load_card",
"matches",
"=",
"0",
"for",
"code",
"in",
"self",
".",
"cards",
":",
"card",
"=",
"load",
"(",
"code",
")",
"if",
"card",
".",
"get_info",
"(",
"key",
")",
"==",
"value",
":",
"matches",
"+=",
"1",
"return",
"matches"
] |
Returns how many cards in the deck have the specified value under the
specified key in their info data.
This method requires a library to be stored in the deck instance and
will return `None` if there is no library.
|
[
"Returns",
"how",
"many",
"cards",
"in",
"the",
"deck",
"have",
"the",
"specified",
"value",
"under",
"the",
"specified",
"key",
"in",
"their",
"info",
"data",
"."
] |
5d3da2980d91a637f80ad7164fbf204a2dd2bd58
|
https://github.com/Nekroze/librarian/blob/5d3da2980d91a637f80ad7164fbf204a2dd2bd58/librarian/deck.py#L86-L103
|
242,499
|
mgk/thingamon
|
thingamon/client.py
|
Client.connect
|
def connect(self):
"""Connect to MQTT server and wait for server to acknowledge"""
if not self.connect_attempted:
self.connect_attempted = True
self.client.connect(self.host, port=self.port)
self.client.loop_start()
while not self.connected:
log.info('waiting for MQTT connection...')
time.sleep(1)
|
python
|
def connect(self):
"""Connect to MQTT server and wait for server to acknowledge"""
if not self.connect_attempted:
self.connect_attempted = True
self.client.connect(self.host, port=self.port)
self.client.loop_start()
while not self.connected:
log.info('waiting for MQTT connection...')
time.sleep(1)
|
[
"def",
"connect",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"connect_attempted",
":",
"self",
".",
"connect_attempted",
"=",
"True",
"self",
".",
"client",
".",
"connect",
"(",
"self",
".",
"host",
",",
"port",
"=",
"self",
".",
"port",
")",
"self",
".",
"client",
".",
"loop_start",
"(",
")",
"while",
"not",
"self",
".",
"connected",
":",
"log",
".",
"info",
"(",
"'waiting for MQTT connection...'",
")",
"time",
".",
"sleep",
"(",
"1",
")"
] |
Connect to MQTT server and wait for server to acknowledge
|
[
"Connect",
"to",
"MQTT",
"server",
"and",
"wait",
"for",
"server",
"to",
"acknowledge"
] |
3f7d68dc2131c347473af15cd5f7d4b669407c6b
|
https://github.com/mgk/thingamon/blob/3f7d68dc2131c347473af15cd5f7d4b669407c6b/thingamon/client.py#L81-L90
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.