id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
243,000
|
honzamach/pynspect
|
pynspect/compilers.py
|
IDEAFilterCompiler.register_variable_compilation
|
def register_variable_compilation(self, path, compilation_cbk, listclass):
"""
Register given compilation method for variable on given path.
:param str path: JPath for given variable.
:param callable compilation_cbk: Compilation callback to be called.
:param class listclass: List class to use for lists.
"""
self.compilations_variable[path] = {
'callback': compilation_cbk,
'listclass': listclass
}
|
python
|
def register_variable_compilation(self, path, compilation_cbk, listclass):
"""
Register given compilation method for variable on given path.
:param str path: JPath for given variable.
:param callable compilation_cbk: Compilation callback to be called.
:param class listclass: List class to use for lists.
"""
self.compilations_variable[path] = {
'callback': compilation_cbk,
'listclass': listclass
}
|
[
"def",
"register_variable_compilation",
"(",
"self",
",",
"path",
",",
"compilation_cbk",
",",
"listclass",
")",
":",
"self",
".",
"compilations_variable",
"[",
"path",
"]",
"=",
"{",
"'callback'",
":",
"compilation_cbk",
",",
"'listclass'",
":",
"listclass",
"}"
] |
Register given compilation method for variable on given path.
:param str path: JPath for given variable.
:param callable compilation_cbk: Compilation callback to be called.
:param class listclass: List class to use for lists.
|
[
"Register",
"given",
"compilation",
"method",
"for",
"variable",
"on",
"given",
"path",
"."
] |
0582dcc1f7aafe50e25a21c792ea1b3367ea5881
|
https://github.com/honzamach/pynspect/blob/0582dcc1f7aafe50e25a21c792ea1b3367ea5881/pynspect/compilers.py#L248-L259
|
243,001
|
honzamach/pynspect
|
pynspect/compilers.py
|
IDEAFilterCompiler.register_function_compilation
|
def register_function_compilation(self, func, compilation_cbk, listclass):
"""
Register given compilation method for given function.
:param str path: Function name.
:param callable compilation_cbk: Compilation callback to be called.
:param class listclass: List class to use for lists.
"""
self.compilations_function[func] = {
'callback': compilation_cbk,
'listclass': listclass
}
|
python
|
def register_function_compilation(self, func, compilation_cbk, listclass):
"""
Register given compilation method for given function.
:param str path: Function name.
:param callable compilation_cbk: Compilation callback to be called.
:param class listclass: List class to use for lists.
"""
self.compilations_function[func] = {
'callback': compilation_cbk,
'listclass': listclass
}
|
[
"def",
"register_function_compilation",
"(",
"self",
",",
"func",
",",
"compilation_cbk",
",",
"listclass",
")",
":",
"self",
".",
"compilations_function",
"[",
"func",
"]",
"=",
"{",
"'callback'",
":",
"compilation_cbk",
",",
"'listclass'",
":",
"listclass",
"}"
] |
Register given compilation method for given function.
:param str path: Function name.
:param callable compilation_cbk: Compilation callback to be called.
:param class listclass: List class to use for lists.
|
[
"Register",
"given",
"compilation",
"method",
"for",
"given",
"function",
"."
] |
0582dcc1f7aafe50e25a21c792ea1b3367ea5881
|
https://github.com/honzamach/pynspect/blob/0582dcc1f7aafe50e25a21c792ea1b3367ea5881/pynspect/compilers.py#L261-L272
|
243,002
|
honzamach/pynspect
|
pynspect/compilers.py
|
IDEAFilterCompiler._cor_compile
|
def _cor_compile(rule, var, val, result_class, key, compilation_list):
"""
Actual compilation worker method.
"""
compilation = compilation_list.get(key, None)
if compilation:
if isinstance(val, ListRule):
result = []
for itemv in val.value:
result.append(compilation['callback'](itemv))
val = compilation['listclass'](result)
else:
val = compilation['callback'](val)
return result_class(rule.operation, var, val)
|
python
|
def _cor_compile(rule, var, val, result_class, key, compilation_list):
"""
Actual compilation worker method.
"""
compilation = compilation_list.get(key, None)
if compilation:
if isinstance(val, ListRule):
result = []
for itemv in val.value:
result.append(compilation['callback'](itemv))
val = compilation['listclass'](result)
else:
val = compilation['callback'](val)
return result_class(rule.operation, var, val)
|
[
"def",
"_cor_compile",
"(",
"rule",
",",
"var",
",",
"val",
",",
"result_class",
",",
"key",
",",
"compilation_list",
")",
":",
"compilation",
"=",
"compilation_list",
".",
"get",
"(",
"key",
",",
"None",
")",
"if",
"compilation",
":",
"if",
"isinstance",
"(",
"val",
",",
"ListRule",
")",
":",
"result",
"=",
"[",
"]",
"for",
"itemv",
"in",
"val",
".",
"value",
":",
"result",
".",
"append",
"(",
"compilation",
"[",
"'callback'",
"]",
"(",
"itemv",
")",
")",
"val",
"=",
"compilation",
"[",
"'listclass'",
"]",
"(",
"result",
")",
"else",
":",
"val",
"=",
"compilation",
"[",
"'callback'",
"]",
"(",
"val",
")",
"return",
"result_class",
"(",
"rule",
".",
"operation",
",",
"var",
",",
"val",
")"
] |
Actual compilation worker method.
|
[
"Actual",
"compilation",
"worker",
"method",
"."
] |
0582dcc1f7aafe50e25a21c792ea1b3367ea5881
|
https://github.com/honzamach/pynspect/blob/0582dcc1f7aafe50e25a21c792ea1b3367ea5881/pynspect/compilers.py#L277-L291
|
243,003
|
honzamach/pynspect
|
pynspect/compilers.py
|
IDEAFilterCompiler._compile_operation_rule
|
def _compile_operation_rule(self, rule, left, right, result_class):
"""
Compile given operation rule, when possible for given compination of
operation operands.
"""
# Make sure variables always have constant with correct datatype on the
# opposite side of operation.
if isinstance(left, VariableRule) and isinstance(right, (ConstantRule, ListRule)):
return self._cor_compile(
rule,
left,
right,
result_class,
clean_variable(left.value),
self.compilations_variable
)
if isinstance(right, VariableRule) and isinstance(left, (ConstantRule, ListRule)):
return self._cor_compile(
rule,
right,
left,
result_class,
clean_variable(right.value),
self.compilations_variable
)
# Make sure functions always have constant with correct datatype on the
# opposite side of operation.
if isinstance(left, FunctionRule) and isinstance(right, (ConstantRule, ListRule)):
return self._cor_compile(
rule,
left,
right,
result_class,
left.function,
self.compilations_function
)
if isinstance(right, FunctionRule) and isinstance(left, (ConstantRule, ListRule)):
return self._cor_compile(
rule,
right,
left,
result_class,
right.function,
self.compilations_function
)
# In all other cases just keep things the way they are.
return result_class(rule.operation, left, right)
|
python
|
def _compile_operation_rule(self, rule, left, right, result_class):
"""
Compile given operation rule, when possible for given compination of
operation operands.
"""
# Make sure variables always have constant with correct datatype on the
# opposite side of operation.
if isinstance(left, VariableRule) and isinstance(right, (ConstantRule, ListRule)):
return self._cor_compile(
rule,
left,
right,
result_class,
clean_variable(left.value),
self.compilations_variable
)
if isinstance(right, VariableRule) and isinstance(left, (ConstantRule, ListRule)):
return self._cor_compile(
rule,
right,
left,
result_class,
clean_variable(right.value),
self.compilations_variable
)
# Make sure functions always have constant with correct datatype on the
# opposite side of operation.
if isinstance(left, FunctionRule) and isinstance(right, (ConstantRule, ListRule)):
return self._cor_compile(
rule,
left,
right,
result_class,
left.function,
self.compilations_function
)
if isinstance(right, FunctionRule) and isinstance(left, (ConstantRule, ListRule)):
return self._cor_compile(
rule,
right,
left,
result_class,
right.function,
self.compilations_function
)
# In all other cases just keep things the way they are.
return result_class(rule.operation, left, right)
|
[
"def",
"_compile_operation_rule",
"(",
"self",
",",
"rule",
",",
"left",
",",
"right",
",",
"result_class",
")",
":",
"# Make sure variables always have constant with correct datatype on the",
"# opposite side of operation.",
"if",
"isinstance",
"(",
"left",
",",
"VariableRule",
")",
"and",
"isinstance",
"(",
"right",
",",
"(",
"ConstantRule",
",",
"ListRule",
")",
")",
":",
"return",
"self",
".",
"_cor_compile",
"(",
"rule",
",",
"left",
",",
"right",
",",
"result_class",
",",
"clean_variable",
"(",
"left",
".",
"value",
")",
",",
"self",
".",
"compilations_variable",
")",
"if",
"isinstance",
"(",
"right",
",",
"VariableRule",
")",
"and",
"isinstance",
"(",
"left",
",",
"(",
"ConstantRule",
",",
"ListRule",
")",
")",
":",
"return",
"self",
".",
"_cor_compile",
"(",
"rule",
",",
"right",
",",
"left",
",",
"result_class",
",",
"clean_variable",
"(",
"right",
".",
"value",
")",
",",
"self",
".",
"compilations_variable",
")",
"# Make sure functions always have constant with correct datatype on the",
"# opposite side of operation.",
"if",
"isinstance",
"(",
"left",
",",
"FunctionRule",
")",
"and",
"isinstance",
"(",
"right",
",",
"(",
"ConstantRule",
",",
"ListRule",
")",
")",
":",
"return",
"self",
".",
"_cor_compile",
"(",
"rule",
",",
"left",
",",
"right",
",",
"result_class",
",",
"left",
".",
"function",
",",
"self",
".",
"compilations_function",
")",
"if",
"isinstance",
"(",
"right",
",",
"FunctionRule",
")",
"and",
"isinstance",
"(",
"left",
",",
"(",
"ConstantRule",
",",
"ListRule",
")",
")",
":",
"return",
"self",
".",
"_cor_compile",
"(",
"rule",
",",
"right",
",",
"left",
",",
"result_class",
",",
"right",
".",
"function",
",",
"self",
".",
"compilations_function",
")",
"# In all other cases just keep things the way they are.",
"return",
"result_class",
"(",
"rule",
".",
"operation",
",",
"left",
",",
"right",
")"
] |
Compile given operation rule, when possible for given compination of
operation operands.
|
[
"Compile",
"given",
"operation",
"rule",
"when",
"possible",
"for",
"given",
"compination",
"of",
"operation",
"operands",
"."
] |
0582dcc1f7aafe50e25a21c792ea1b3367ea5881
|
https://github.com/honzamach/pynspect/blob/0582dcc1f7aafe50e25a21c792ea1b3367ea5881/pynspect/compilers.py#L293-L342
|
243,004
|
honzamach/pynspect
|
pynspect/compilers.py
|
IDEAFilterCompiler._calculate_operation_math
|
def _calculate_operation_math(self, rule, left, right):
"""
Perform compilation of given math operation by actually calculating given
math expression.
"""
# Attempt to keep integer data type for the result, when possible.
if isinstance(left, IntegerRule) and isinstance(right, IntegerRule):
result = self.evaluate_binop_math(rule.operation, left.value, right.value)
if isinstance(result, list):
return ListRule([IntegerRule(r) for r in result])
return IntegerRule(result)
# Otherwise the result is float.
if isinstance(left, NumberRule) and isinstance(right, NumberRule):
result = self.evaluate_binop_math(rule.operation, left.value, right.value)
if isinstance(result, list):
return ListRule([FloatRule(r) for r in result])
return FloatRule(result)
# This point should never be reached.
raise Exception()
|
python
|
def _calculate_operation_math(self, rule, left, right):
"""
Perform compilation of given math operation by actually calculating given
math expression.
"""
# Attempt to keep integer data type for the result, when possible.
if isinstance(left, IntegerRule) and isinstance(right, IntegerRule):
result = self.evaluate_binop_math(rule.operation, left.value, right.value)
if isinstance(result, list):
return ListRule([IntegerRule(r) for r in result])
return IntegerRule(result)
# Otherwise the result is float.
if isinstance(left, NumberRule) and isinstance(right, NumberRule):
result = self.evaluate_binop_math(rule.operation, left.value, right.value)
if isinstance(result, list):
return ListRule([FloatRule(r) for r in result])
return FloatRule(result)
# This point should never be reached.
raise Exception()
|
[
"def",
"_calculate_operation_math",
"(",
"self",
",",
"rule",
",",
"left",
",",
"right",
")",
":",
"# Attempt to keep integer data type for the result, when possible.",
"if",
"isinstance",
"(",
"left",
",",
"IntegerRule",
")",
"and",
"isinstance",
"(",
"right",
",",
"IntegerRule",
")",
":",
"result",
"=",
"self",
".",
"evaluate_binop_math",
"(",
"rule",
".",
"operation",
",",
"left",
".",
"value",
",",
"right",
".",
"value",
")",
"if",
"isinstance",
"(",
"result",
",",
"list",
")",
":",
"return",
"ListRule",
"(",
"[",
"IntegerRule",
"(",
"r",
")",
"for",
"r",
"in",
"result",
"]",
")",
"return",
"IntegerRule",
"(",
"result",
")",
"# Otherwise the result is float.",
"if",
"isinstance",
"(",
"left",
",",
"NumberRule",
")",
"and",
"isinstance",
"(",
"right",
",",
"NumberRule",
")",
":",
"result",
"=",
"self",
".",
"evaluate_binop_math",
"(",
"rule",
".",
"operation",
",",
"left",
".",
"value",
",",
"right",
".",
"value",
")",
"if",
"isinstance",
"(",
"result",
",",
"list",
")",
":",
"return",
"ListRule",
"(",
"[",
"FloatRule",
"(",
"r",
")",
"for",
"r",
"in",
"result",
"]",
")",
"return",
"FloatRule",
"(",
"result",
")",
"# This point should never be reached.",
"raise",
"Exception",
"(",
")"
] |
Perform compilation of given math operation by actually calculating given
math expression.
|
[
"Perform",
"compilation",
"of",
"given",
"math",
"operation",
"by",
"actually",
"calculating",
"given",
"math",
"expression",
"."
] |
0582dcc1f7aafe50e25a21c792ea1b3367ea5881
|
https://github.com/honzamach/pynspect/blob/0582dcc1f7aafe50e25a21c792ea1b3367ea5881/pynspect/compilers.py#L344-L365
|
243,005
|
pyvec/pyvodb
|
pyvodb/load.py
|
get_db
|
def get_db(directory, engine=None):
"""Get a database
:param directory: The root data directory
:param engine: a pre-created SQLAlchemy engine (default: in-memory SQLite)
"""
if engine is None:
engine = create_engine('sqlite://')
tables.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
db = Session()
if directory is not None:
load_from_directory(db, directory)
return db
|
python
|
def get_db(directory, engine=None):
"""Get a database
:param directory: The root data directory
:param engine: a pre-created SQLAlchemy engine (default: in-memory SQLite)
"""
if engine is None:
engine = create_engine('sqlite://')
tables.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
db = Session()
if directory is not None:
load_from_directory(db, directory)
return db
|
[
"def",
"get_db",
"(",
"directory",
",",
"engine",
"=",
"None",
")",
":",
"if",
"engine",
"is",
"None",
":",
"engine",
"=",
"create_engine",
"(",
"'sqlite://'",
")",
"tables",
".",
"metadata",
".",
"create_all",
"(",
"engine",
")",
"Session",
"=",
"sessionmaker",
"(",
"bind",
"=",
"engine",
")",
"db",
"=",
"Session",
"(",
")",
"if",
"directory",
"is",
"not",
"None",
":",
"load_from_directory",
"(",
"db",
",",
"directory",
")",
"return",
"db"
] |
Get a database
:param directory: The root data directory
:param engine: a pre-created SQLAlchemy engine (default: in-memory SQLite)
|
[
"Get",
"a",
"database"
] |
07183333df26eb12c5c2b98802cde3fb3a6c1339
|
https://github.com/pyvec/pyvodb/blob/07183333df26eb12c5c2b98802cde3fb3a6c1339/pyvodb/load.py#L22-L35
|
243,006
|
legnaleurc/wcpan.listen
|
wcpan/listen/helper.py
|
bind_unix_socket
|
def bind_unix_socket(file_, mode=0o600, backlog=_DEFAULT_BACKLOG):
"""Creates a listening unix socket.
If a socket with the given name already exists, it will be deleted.
If any other file with that name exists, an exception will be
raised.
Returns a socket object (not a list of socket objects like
`bind_sockets`)
"""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
try:
st = os.stat(file_)
except OSError as err:
if err.errno != errno.ENOENT:
raise
else:
if stat.S_ISSOCK(st.st_mode):
os.remove(file_)
else:
raise ValueError('File %s exists and is not a socket', file_)
sock.bind(file_)
os.chmod(file_, mode)
sock.listen(backlog)
return sock
|
python
|
def bind_unix_socket(file_, mode=0o600, backlog=_DEFAULT_BACKLOG):
"""Creates a listening unix socket.
If a socket with the given name already exists, it will be deleted.
If any other file with that name exists, an exception will be
raised.
Returns a socket object (not a list of socket objects like
`bind_sockets`)
"""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
try:
st = os.stat(file_)
except OSError as err:
if err.errno != errno.ENOENT:
raise
else:
if stat.S_ISSOCK(st.st_mode):
os.remove(file_)
else:
raise ValueError('File %s exists and is not a socket', file_)
sock.bind(file_)
os.chmod(file_, mode)
sock.listen(backlog)
return sock
|
[
"def",
"bind_unix_socket",
"(",
"file_",
",",
"mode",
"=",
"0o600",
",",
"backlog",
"=",
"_DEFAULT_BACKLOG",
")",
":",
"sock",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_UNIX",
",",
"socket",
".",
"SOCK_STREAM",
")",
"sock",
".",
"setsockopt",
"(",
"socket",
".",
"SOL_SOCKET",
",",
"socket",
".",
"SO_REUSEADDR",
",",
"1",
")",
"sock",
".",
"setblocking",
"(",
"0",
")",
"try",
":",
"st",
"=",
"os",
".",
"stat",
"(",
"file_",
")",
"except",
"OSError",
"as",
"err",
":",
"if",
"err",
".",
"errno",
"!=",
"errno",
".",
"ENOENT",
":",
"raise",
"else",
":",
"if",
"stat",
".",
"S_ISSOCK",
"(",
"st",
".",
"st_mode",
")",
":",
"os",
".",
"remove",
"(",
"file_",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'File %s exists and is not a socket'",
",",
"file_",
")",
"sock",
".",
"bind",
"(",
"file_",
")",
"os",
".",
"chmod",
"(",
"file_",
",",
"mode",
")",
"sock",
".",
"listen",
"(",
"backlog",
")",
"return",
"sock"
] |
Creates a listening unix socket.
If a socket with the given name already exists, it will be deleted.
If any other file with that name exists, an exception will be
raised.
Returns a socket object (not a list of socket objects like
`bind_sockets`)
|
[
"Creates",
"a",
"listening",
"unix",
"socket",
"."
] |
9c11e92ec5db588fc181543ad723b6eef0741db1
|
https://github.com/legnaleurc/wcpan.listen/blob/9c11e92ec5db588fc181543ad723b6eef0741db1/wcpan/listen/helper.py#L188-L214
|
243,007
|
noobermin/lspreader
|
lspreader/pmovie.py
|
firsthash
|
def firsthash(frame, removedupes=False):
'''
Hashes the first time step. Only will work as long as
the hash can fit in a uint64.
Parameters:
-----------
frame : first frame.
Keywords:
---------
removedups: specify duplicates for the given frame.
Returns a dictionary of everything needed
to generate hashes from the genhash function.
'''
#hashes must have i8 available
#overwise, we'll have overflow
def avgdiff(d):
d=np.sort(d);
d = d[1:] - d[:-1]
ret = np.average(d[np.nonzero(d)]);
if np.isnan(ret):
return 1.0;
return ret;
def hasextent(l,eps=1e-10):
#will I one day make pic sims on the pm scale??
dim = frame['data'][l];
return np.abs(dim.max()-dim.min()) > eps;
fields = list(frame['data'].dtype.names);
dims = [ i for i in ['xi','yi','zi']
if i in fields and hasextent(i) ];
ip = np.array([ frame['data'][l]
for l in dims ]).T;
avgdiffs = np.array([avgdiff(a) for a in ip.T]);
mins = ip.min(axis=0);
ips = (((ip - mins)/avgdiffs).round().astype('uint64'))
pws = np.floor(np.log10(ips.max(axis=0))).astype('uint64')+1
pws = list(pws);
pw = [0]+[ ipw+jpw for ipw,jpw in
zip([0]+pws[:-1],pws[:-1]) ];
pw = 10**np.array(pw);#.astype('int64');
#the dictionary used for hashing
d=dict(dims=dims, mins=mins, avgdiffs=avgdiffs, pw=pw);
hashes = genhash(frame,removedupes=False,**d);
if removedupes:
#consider if the negation of this is faster for genhash
uni,counts = np.unique(hashes,return_counts=True);
d['dupes']=uni[counts>1]
dupei = np.in1d(hashes, d['dupes']);
hashes[dupei] = -1;
d['removedupes']=True;
return hashes,d
|
python
|
def firsthash(frame, removedupes=False):
'''
Hashes the first time step. Only will work as long as
the hash can fit in a uint64.
Parameters:
-----------
frame : first frame.
Keywords:
---------
removedups: specify duplicates for the given frame.
Returns a dictionary of everything needed
to generate hashes from the genhash function.
'''
#hashes must have i8 available
#overwise, we'll have overflow
def avgdiff(d):
d=np.sort(d);
d = d[1:] - d[:-1]
ret = np.average(d[np.nonzero(d)]);
if np.isnan(ret):
return 1.0;
return ret;
def hasextent(l,eps=1e-10):
#will I one day make pic sims on the pm scale??
dim = frame['data'][l];
return np.abs(dim.max()-dim.min()) > eps;
fields = list(frame['data'].dtype.names);
dims = [ i for i in ['xi','yi','zi']
if i in fields and hasextent(i) ];
ip = np.array([ frame['data'][l]
for l in dims ]).T;
avgdiffs = np.array([avgdiff(a) for a in ip.T]);
mins = ip.min(axis=0);
ips = (((ip - mins)/avgdiffs).round().astype('uint64'))
pws = np.floor(np.log10(ips.max(axis=0))).astype('uint64')+1
pws = list(pws);
pw = [0]+[ ipw+jpw for ipw,jpw in
zip([0]+pws[:-1],pws[:-1]) ];
pw = 10**np.array(pw);#.astype('int64');
#the dictionary used for hashing
d=dict(dims=dims, mins=mins, avgdiffs=avgdiffs, pw=pw);
hashes = genhash(frame,removedupes=False,**d);
if removedupes:
#consider if the negation of this is faster for genhash
uni,counts = np.unique(hashes,return_counts=True);
d['dupes']=uni[counts>1]
dupei = np.in1d(hashes, d['dupes']);
hashes[dupei] = -1;
d['removedupes']=True;
return hashes,d
|
[
"def",
"firsthash",
"(",
"frame",
",",
"removedupes",
"=",
"False",
")",
":",
"#hashes must have i8 available",
"#overwise, we'll have overflow",
"def",
"avgdiff",
"(",
"d",
")",
":",
"d",
"=",
"np",
".",
"sort",
"(",
"d",
")",
"d",
"=",
"d",
"[",
"1",
":",
"]",
"-",
"d",
"[",
":",
"-",
"1",
"]",
"ret",
"=",
"np",
".",
"average",
"(",
"d",
"[",
"np",
".",
"nonzero",
"(",
"d",
")",
"]",
")",
"if",
"np",
".",
"isnan",
"(",
"ret",
")",
":",
"return",
"1.0",
"return",
"ret",
"def",
"hasextent",
"(",
"l",
",",
"eps",
"=",
"1e-10",
")",
":",
"#will I one day make pic sims on the pm scale??",
"dim",
"=",
"frame",
"[",
"'data'",
"]",
"[",
"l",
"]",
"return",
"np",
".",
"abs",
"(",
"dim",
".",
"max",
"(",
")",
"-",
"dim",
".",
"min",
"(",
")",
")",
">",
"eps",
"fields",
"=",
"list",
"(",
"frame",
"[",
"'data'",
"]",
".",
"dtype",
".",
"names",
")",
"dims",
"=",
"[",
"i",
"for",
"i",
"in",
"[",
"'xi'",
",",
"'yi'",
",",
"'zi'",
"]",
"if",
"i",
"in",
"fields",
"and",
"hasextent",
"(",
"i",
")",
"]",
"ip",
"=",
"np",
".",
"array",
"(",
"[",
"frame",
"[",
"'data'",
"]",
"[",
"l",
"]",
"for",
"l",
"in",
"dims",
"]",
")",
".",
"T",
"avgdiffs",
"=",
"np",
".",
"array",
"(",
"[",
"avgdiff",
"(",
"a",
")",
"for",
"a",
"in",
"ip",
".",
"T",
"]",
")",
"mins",
"=",
"ip",
".",
"min",
"(",
"axis",
"=",
"0",
")",
"ips",
"=",
"(",
"(",
"(",
"ip",
"-",
"mins",
")",
"/",
"avgdiffs",
")",
".",
"round",
"(",
")",
".",
"astype",
"(",
"'uint64'",
")",
")",
"pws",
"=",
"np",
".",
"floor",
"(",
"np",
".",
"log10",
"(",
"ips",
".",
"max",
"(",
"axis",
"=",
"0",
")",
")",
")",
".",
"astype",
"(",
"'uint64'",
")",
"+",
"1",
"pws",
"=",
"list",
"(",
"pws",
")",
"pw",
"=",
"[",
"0",
"]",
"+",
"[",
"ipw",
"+",
"jpw",
"for",
"ipw",
",",
"jpw",
"in",
"zip",
"(",
"[",
"0",
"]",
"+",
"pws",
"[",
":",
"-",
"1",
"]",
",",
"pws",
"[",
":",
"-",
"1",
"]",
")",
"]",
"pw",
"=",
"10",
"**",
"np",
".",
"array",
"(",
"pw",
")",
"#.astype('int64');",
"#the dictionary used for hashing",
"d",
"=",
"dict",
"(",
"dims",
"=",
"dims",
",",
"mins",
"=",
"mins",
",",
"avgdiffs",
"=",
"avgdiffs",
",",
"pw",
"=",
"pw",
")",
"hashes",
"=",
"genhash",
"(",
"frame",
",",
"removedupes",
"=",
"False",
",",
"*",
"*",
"d",
")",
"if",
"removedupes",
":",
"#consider if the negation of this is faster for genhash",
"uni",
",",
"counts",
"=",
"np",
".",
"unique",
"(",
"hashes",
",",
"return_counts",
"=",
"True",
")",
"d",
"[",
"'dupes'",
"]",
"=",
"uni",
"[",
"counts",
">",
"1",
"]",
"dupei",
"=",
"np",
".",
"in1d",
"(",
"hashes",
",",
"d",
"[",
"'dupes'",
"]",
")",
"hashes",
"[",
"dupei",
"]",
"=",
"-",
"1",
"d",
"[",
"'removedupes'",
"]",
"=",
"True",
"return",
"hashes",
",",
"d"
] |
Hashes the first time step. Only will work as long as
the hash can fit in a uint64.
Parameters:
-----------
frame : first frame.
Keywords:
---------
removedups: specify duplicates for the given frame.
Returns a dictionary of everything needed
to generate hashes from the genhash function.
|
[
"Hashes",
"the",
"first",
"time",
"step",
".",
"Only",
"will",
"work",
"as",
"long",
"as",
"the",
"hash",
"can",
"fit",
"in",
"a",
"uint64",
"."
] |
903b9d6427513b07986ffacf76cbca54e18d8be6
|
https://github.com/noobermin/lspreader/blob/903b9d6427513b07986ffacf76cbca54e18d8be6/lspreader/pmovie.py#L14-L67
|
243,008
|
noobermin/lspreader
|
lspreader/pmovie.py
|
genhash
|
def genhash(frame,**kw):
'''
Generate the hashes for the given frame for a specification
given in the dictionary d returned from firsthash.
Parameters:
-----------
frame : frame to hash.
Keywords:
---------
d : hash specification generated from firsthash.
new : use new hashing, which isn't really hashing.
removedups: put -1 in duplicates,
dims : specify dims. Supercedes the setting in `d'.
dupes : array of hashes known to be dupes.
ftype : type of floats. defaults to 'f'.
-- old keywords from old hashing --
mins : minima of each axis
avgdifs : average differences
pw : powers of each axis
Returns an array of the shape of the frames with hashes.
'''
getkw = mk_getkw(kw,genhash_defaults,prefer_passed=True);
dims = getkw('dims');
dupes= getkw('dupes');
if not getkw('new'):
ip = np.array([frame['data'][l] for l in dims]).T;
scaled = ((ip - getkw('mins'))/getkw('avgdiffs')).round().astype('int64');
hashes = (scaled*getkw('pw')).sum(axis=1).astype('int64');
else:
hashes = np.array([
struct.pack('{}{}'.format(len(dims),getkw('ftype')), *[p[l] for l in dims])
for p in frame['data']]);
if getkw('removedupes'):
#marking duplicated particles
if not getkw('dupes'):
hashes = np.unique(hashes);
else:
dupei = np.in1d(hashes, getkw('dupes'));
hashes[dupei] = -1
return hashes;
|
python
|
def genhash(frame,**kw):
'''
Generate the hashes for the given frame for a specification
given in the dictionary d returned from firsthash.
Parameters:
-----------
frame : frame to hash.
Keywords:
---------
d : hash specification generated from firsthash.
new : use new hashing, which isn't really hashing.
removedups: put -1 in duplicates,
dims : specify dims. Supercedes the setting in `d'.
dupes : array of hashes known to be dupes.
ftype : type of floats. defaults to 'f'.
-- old keywords from old hashing --
mins : minima of each axis
avgdifs : average differences
pw : powers of each axis
Returns an array of the shape of the frames with hashes.
'''
getkw = mk_getkw(kw,genhash_defaults,prefer_passed=True);
dims = getkw('dims');
dupes= getkw('dupes');
if not getkw('new'):
ip = np.array([frame['data'][l] for l in dims]).T;
scaled = ((ip - getkw('mins'))/getkw('avgdiffs')).round().astype('int64');
hashes = (scaled*getkw('pw')).sum(axis=1).astype('int64');
else:
hashes = np.array([
struct.pack('{}{}'.format(len(dims),getkw('ftype')), *[p[l] for l in dims])
for p in frame['data']]);
if getkw('removedupes'):
#marking duplicated particles
if not getkw('dupes'):
hashes = np.unique(hashes);
else:
dupei = np.in1d(hashes, getkw('dupes'));
hashes[dupei] = -1
return hashes;
|
[
"def",
"genhash",
"(",
"frame",
",",
"*",
"*",
"kw",
")",
":",
"getkw",
"=",
"mk_getkw",
"(",
"kw",
",",
"genhash_defaults",
",",
"prefer_passed",
"=",
"True",
")",
"dims",
"=",
"getkw",
"(",
"'dims'",
")",
"dupes",
"=",
"getkw",
"(",
"'dupes'",
")",
"if",
"not",
"getkw",
"(",
"'new'",
")",
":",
"ip",
"=",
"np",
".",
"array",
"(",
"[",
"frame",
"[",
"'data'",
"]",
"[",
"l",
"]",
"for",
"l",
"in",
"dims",
"]",
")",
".",
"T",
"scaled",
"=",
"(",
"(",
"ip",
"-",
"getkw",
"(",
"'mins'",
")",
")",
"/",
"getkw",
"(",
"'avgdiffs'",
")",
")",
".",
"round",
"(",
")",
".",
"astype",
"(",
"'int64'",
")",
"hashes",
"=",
"(",
"scaled",
"*",
"getkw",
"(",
"'pw'",
")",
")",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
".",
"astype",
"(",
"'int64'",
")",
"else",
":",
"hashes",
"=",
"np",
".",
"array",
"(",
"[",
"struct",
".",
"pack",
"(",
"'{}{}'",
".",
"format",
"(",
"len",
"(",
"dims",
")",
",",
"getkw",
"(",
"'ftype'",
")",
")",
",",
"*",
"[",
"p",
"[",
"l",
"]",
"for",
"l",
"in",
"dims",
"]",
")",
"for",
"p",
"in",
"frame",
"[",
"'data'",
"]",
"]",
")",
"if",
"getkw",
"(",
"'removedupes'",
")",
":",
"#marking duplicated particles",
"if",
"not",
"getkw",
"(",
"'dupes'",
")",
":",
"hashes",
"=",
"np",
".",
"unique",
"(",
"hashes",
")",
"else",
":",
"dupei",
"=",
"np",
".",
"in1d",
"(",
"hashes",
",",
"getkw",
"(",
"'dupes'",
")",
")",
"hashes",
"[",
"dupei",
"]",
"=",
"-",
"1",
"return",
"hashes"
] |
Generate the hashes for the given frame for a specification
given in the dictionary d returned from firsthash.
Parameters:
-----------
frame : frame to hash.
Keywords:
---------
d : hash specification generated from firsthash.
new : use new hashing, which isn't really hashing.
removedups: put -1 in duplicates,
dims : specify dims. Supercedes the setting in `d'.
dupes : array of hashes known to be dupes.
ftype : type of floats. defaults to 'f'.
-- old keywords from old hashing --
mins : minima of each axis
avgdifs : average differences
pw : powers of each axis
Returns an array of the shape of the frames with hashes.
|
[
"Generate",
"the",
"hashes",
"for",
"the",
"given",
"frame",
"for",
"a",
"specification",
"given",
"in",
"the",
"dictionary",
"d",
"returned",
"from",
"firsthash",
"."
] |
903b9d6427513b07986ffacf76cbca54e18d8be6
|
https://github.com/noobermin/lspreader/blob/903b9d6427513b07986ffacf76cbca54e18d8be6/lspreader/pmovie.py#L87-L130
|
243,009
|
noobermin/lspreader
|
lspreader/pmovie.py
|
addhash
|
def addhash(frame,**kw):
'''
helper function to add hashes to the given frame
given in the dictionary d returned from firsthash.
Parameters:
-----------
frame : frame to hash.
Keywords:
---------
same as genhash
Returns frame with added hashes, although it will be added in
place.
'''
hashes = genhash(frame,**kw);
frame['data'] = rfn.rec_append_fields(
frame['data'],'hash',hashes);
return frame;
|
python
|
def addhash(frame,**kw):
'''
helper function to add hashes to the given frame
given in the dictionary d returned from firsthash.
Parameters:
-----------
frame : frame to hash.
Keywords:
---------
same as genhash
Returns frame with added hashes, although it will be added in
place.
'''
hashes = genhash(frame,**kw);
frame['data'] = rfn.rec_append_fields(
frame['data'],'hash',hashes);
return frame;
|
[
"def",
"addhash",
"(",
"frame",
",",
"*",
"*",
"kw",
")",
":",
"hashes",
"=",
"genhash",
"(",
"frame",
",",
"*",
"*",
"kw",
")",
"frame",
"[",
"'data'",
"]",
"=",
"rfn",
".",
"rec_append_fields",
"(",
"frame",
"[",
"'data'",
"]",
",",
"'hash'",
",",
"hashes",
")",
"return",
"frame"
] |
helper function to add hashes to the given frame
given in the dictionary d returned from firsthash.
Parameters:
-----------
frame : frame to hash.
Keywords:
---------
same as genhash
Returns frame with added hashes, although it will be added in
place.
|
[
"helper",
"function",
"to",
"add",
"hashes",
"to",
"the",
"given",
"frame",
"given",
"in",
"the",
"dictionary",
"d",
"returned",
"from",
"firsthash",
"."
] |
903b9d6427513b07986ffacf76cbca54e18d8be6
|
https://github.com/noobermin/lspreader/blob/903b9d6427513b07986ffacf76cbca54e18d8be6/lspreader/pmovie.py#L132-L151
|
243,010
|
noobermin/lspreader
|
lspreader/pmovie.py
|
sortframe
|
def sortframe(frame):
'''
sorts particles for a frame
'''
d = frame['data'];
sortedargs = np.lexsort([d['xi'],d['yi'],d['zi']])
d = d[sortedargs];
frame['data']=d;
return frame;
|
python
|
def sortframe(frame):
'''
sorts particles for a frame
'''
d = frame['data'];
sortedargs = np.lexsort([d['xi'],d['yi'],d['zi']])
d = d[sortedargs];
frame['data']=d;
return frame;
|
[
"def",
"sortframe",
"(",
"frame",
")",
":",
"d",
"=",
"frame",
"[",
"'data'",
"]",
"sortedargs",
"=",
"np",
".",
"lexsort",
"(",
"[",
"d",
"[",
"'xi'",
"]",
",",
"d",
"[",
"'yi'",
"]",
",",
"d",
"[",
"'zi'",
"]",
"]",
")",
"d",
"=",
"d",
"[",
"sortedargs",
"]",
"frame",
"[",
"'data'",
"]",
"=",
"d",
"return",
"frame"
] |
sorts particles for a frame
|
[
"sorts",
"particles",
"for",
"a",
"frame"
] |
903b9d6427513b07986ffacf76cbca54e18d8be6
|
https://github.com/noobermin/lspreader/blob/903b9d6427513b07986ffacf76cbca54e18d8be6/lspreader/pmovie.py#L153-L161
|
243,011
|
noobermin/lspreader
|
lspreader/pmovie.py
|
read_and_hash
|
def read_and_hash(fname, **kw):
'''
Read and and addhash each frame.
'''
return [addhash(frame, **kw) for frame in read(fname, **kw)];
|
python
|
def read_and_hash(fname, **kw):
'''
Read and and addhash each frame.
'''
return [addhash(frame, **kw) for frame in read(fname, **kw)];
|
[
"def",
"read_and_hash",
"(",
"fname",
",",
"*",
"*",
"kw",
")",
":",
"return",
"[",
"addhash",
"(",
"frame",
",",
"*",
"*",
"kw",
")",
"for",
"frame",
"in",
"read",
"(",
"fname",
",",
"*",
"*",
"kw",
")",
"]"
] |
Read and and addhash each frame.
|
[
"Read",
"and",
"and",
"addhash",
"each",
"frame",
"."
] |
903b9d6427513b07986ffacf76cbca54e18d8be6
|
https://github.com/noobermin/lspreader/blob/903b9d6427513b07986ffacf76cbca54e18d8be6/lspreader/pmovie.py#L163-L167
|
243,012
|
noobermin/lspreader
|
lspreader/pmovie.py
|
filter_hashes_from_file
|
def filter_hashes_from_file(fname, f, **kw):
'''
Obtain good hashes from a .p4 file with the dict hashd and a
function that returns good hashes. Any keywords will be
sent to read_and_hash.
Parameters:
-----------
fname -- filename of file.
f -- function that returns a list of good hashes.
'''
return np.concatenate([
frame['data']['hash'][f(frame)]
for frame in read_and_hash(fname, **kw)
]);
|
python
|
def filter_hashes_from_file(fname, f, **kw):
'''
Obtain good hashes from a .p4 file with the dict hashd and a
function that returns good hashes. Any keywords will be
sent to read_and_hash.
Parameters:
-----------
fname -- filename of file.
f -- function that returns a list of good hashes.
'''
return np.concatenate([
frame['data']['hash'][f(frame)]
for frame in read_and_hash(fname, **kw)
]);
|
[
"def",
"filter_hashes_from_file",
"(",
"fname",
",",
"f",
",",
"*",
"*",
"kw",
")",
":",
"return",
"np",
".",
"concatenate",
"(",
"[",
"frame",
"[",
"'data'",
"]",
"[",
"'hash'",
"]",
"[",
"f",
"(",
"frame",
")",
"]",
"for",
"frame",
"in",
"read_and_hash",
"(",
"fname",
",",
"*",
"*",
"kw",
")",
"]",
")"
] |
Obtain good hashes from a .p4 file with the dict hashd and a
function that returns good hashes. Any keywords will be
sent to read_and_hash.
Parameters:
-----------
fname -- filename of file.
f -- function that returns a list of good hashes.
|
[
"Obtain",
"good",
"hashes",
"from",
"a",
".",
"p4",
"file",
"with",
"the",
"dict",
"hashd",
"and",
"a",
"function",
"that",
"returns",
"good",
"hashes",
".",
"Any",
"keywords",
"will",
"be",
"sent",
"to",
"read_and_hash",
"."
] |
903b9d6427513b07986ffacf76cbca54e18d8be6
|
https://github.com/noobermin/lspreader/blob/903b9d6427513b07986ffacf76cbca54e18d8be6/lspreader/pmovie.py#L169-L184
|
243,013
|
cuescience/goat
|
goat/matcher.py
|
GoatMatcher.check_match
|
def check_match(self, step) -> list:
"""Like matchers.CFParseMatcher.check_match but
also add the implicit parameters from the context
"""
args = []
match = super().check_match(step)
if match is None:
return None
for arg in match:
args.append(model.Argument.from_argument(arg))
for arg in self.context_params:
args.append(model.Argument(0, 0, "", None, name=arg, implicit=True))
return args
|
python
|
def check_match(self, step) -> list:
"""Like matchers.CFParseMatcher.check_match but
also add the implicit parameters from the context
"""
args = []
match = super().check_match(step)
if match is None:
return None
for arg in match:
args.append(model.Argument.from_argument(arg))
for arg in self.context_params:
args.append(model.Argument(0, 0, "", None, name=arg, implicit=True))
return args
|
[
"def",
"check_match",
"(",
"self",
",",
"step",
")",
"->",
"list",
":",
"args",
"=",
"[",
"]",
"match",
"=",
"super",
"(",
")",
".",
"check_match",
"(",
"step",
")",
"if",
"match",
"is",
"None",
":",
"return",
"None",
"for",
"arg",
"in",
"match",
":",
"args",
".",
"append",
"(",
"model",
".",
"Argument",
".",
"from_argument",
"(",
"arg",
")",
")",
"for",
"arg",
"in",
"self",
".",
"context_params",
":",
"args",
".",
"append",
"(",
"model",
".",
"Argument",
"(",
"0",
",",
"0",
",",
"\"\"",
",",
"None",
",",
"name",
"=",
"arg",
",",
"implicit",
"=",
"True",
")",
")",
"return",
"args"
] |
Like matchers.CFParseMatcher.check_match but
also add the implicit parameters from the context
|
[
"Like",
"matchers",
".",
"CFParseMatcher",
".",
"check_match",
"but",
"also",
"add",
"the",
"implicit",
"parameters",
"from",
"the",
"context"
] |
d76f44b9ec5dc40ad33abca50830c0d7492ef152
|
https://github.com/cuescience/goat/blob/d76f44b9ec5dc40ad33abca50830c0d7492ef152/goat/matcher.py#L52-L67
|
243,014
|
cuescience/goat
|
goat/matcher.py
|
GoatMatcher.convert
|
def convert(self, pattern: str) -> str:
"""Convert the goat step string to CFParse String"""
parameters = OrderedDict()
for parameter in self.signature.parameters.values():
annotation = self.convert_type_to_parse_type(parameter)
parameters[parameter.name] = "{%s:%s}" % (parameter.name, annotation)
formatter = GoatFormatter()
# We have to use vformat here to ensure that kwargs will be OrderedDict
values = parameters.values()
parameter_list = list(values)
converted_pattern = formatter.vformat(pattern, parameter_list, parameters)
self.context_params = formatter.unused_args
return converted_pattern
|
python
|
def convert(self, pattern: str) -> str:
"""Convert the goat step string to CFParse String"""
parameters = OrderedDict()
for parameter in self.signature.parameters.values():
annotation = self.convert_type_to_parse_type(parameter)
parameters[parameter.name] = "{%s:%s}" % (parameter.name, annotation)
formatter = GoatFormatter()
# We have to use vformat here to ensure that kwargs will be OrderedDict
values = parameters.values()
parameter_list = list(values)
converted_pattern = formatter.vformat(pattern, parameter_list, parameters)
self.context_params = formatter.unused_args
return converted_pattern
|
[
"def",
"convert",
"(",
"self",
",",
"pattern",
":",
"str",
")",
"->",
"str",
":",
"parameters",
"=",
"OrderedDict",
"(",
")",
"for",
"parameter",
"in",
"self",
".",
"signature",
".",
"parameters",
".",
"values",
"(",
")",
":",
"annotation",
"=",
"self",
".",
"convert_type_to_parse_type",
"(",
"parameter",
")",
"parameters",
"[",
"parameter",
".",
"name",
"]",
"=",
"\"{%s:%s}\"",
"%",
"(",
"parameter",
".",
"name",
",",
"annotation",
")",
"formatter",
"=",
"GoatFormatter",
"(",
")",
"# We have to use vformat here to ensure that kwargs will be OrderedDict",
"values",
"=",
"parameters",
".",
"values",
"(",
")",
"parameter_list",
"=",
"list",
"(",
"values",
")",
"converted_pattern",
"=",
"formatter",
".",
"vformat",
"(",
"pattern",
",",
"parameter_list",
",",
"parameters",
")",
"self",
".",
"context_params",
"=",
"formatter",
".",
"unused_args",
"return",
"converted_pattern"
] |
Convert the goat step string to CFParse String
|
[
"Convert",
"the",
"goat",
"step",
"string",
"to",
"CFParse",
"String"
] |
d76f44b9ec5dc40ad33abca50830c0d7492ef152
|
https://github.com/cuescience/goat/blob/d76f44b9ec5dc40ad33abca50830c0d7492ef152/goat/matcher.py#L76-L91
|
243,015
|
BioMapOrg/biomap-utils
|
biomap/utils/xml.py
|
xml_to_json
|
def xml_to_json(root, tag_prefix=None, on_tag={}):
'''
Parses a XML element to JSON format.
This is a relatively generic function parsing a XML element
to JSON format. It does not guarantee any specific formal
behaviour but is empirically known to "work well" with respect
to the author's needs. External verification of the returned
results by the user is therefore instrumental.
For bigger XML elements the whole procedure may take a while,
so the philosophy should be to save the laboriously mapped
JSON data structure to a file once you have it. This of course
also means that this functions is probably of little value
when you have to constantly JSONify big XMLs. In summary,
this function is mostly useful for one-time parsing of XML to
JSON for subsequent use of the resulting JSON data instead of
the XML-formated data.
Args:
root: A XML element
tag_prefix: A tag prefix which will be cut from the keys
on_tag: User-defined parsing for elements identified by tag
Returns:
A Python data structure corresponding to the JSON mapping
of the supplied XML element
'''
def get_key(tag):
if tag_prefix is not None:
return tag.split(tag_prefix)[1]
return tag
def parse_element(elmt):
key = get_key(elmt.tag)
if key in on_tag:
return on_tag[key](elmt)
items = dict(elmt.items())
if len(elmt) == 0:
if items:
return { **items, **{key : elmt.text} }
else:
return elmt.text
else:
tags = {child.tag for child in elmt}
max_children = max({len(child) for child in elmt})
if len(tags) == 1:
value_list = [parse_element(child) for child in elmt]
if items:
return { **items, **{key : value_list} }
else:
return value_list
elif len(tags) > 1:
tag2children = {tag: [] for tag in tags}
for child in elmt:
tag2children[child.tag].append(child)
if max_children == 0:
value_dict = {get_key(tag) : [child.text for child in children] if len(children) > 1
else children[0].text
for tag, children in tag2children.items()}
else:
value_dict = {get_key(tag) : [parse_element(child) for child in children] if len(children) > 1
else parse_element(children[0])
for tag, children in tag2children.items()}
if items:
return { **items, **value_dict }
else:
return value_dict
# ---
return parse_element(root)
|
python
|
def xml_to_json(root, tag_prefix=None, on_tag={}):
'''
Parses a XML element to JSON format.
This is a relatively generic function parsing a XML element
to JSON format. It does not guarantee any specific formal
behaviour but is empirically known to "work well" with respect
to the author's needs. External verification of the returned
results by the user is therefore instrumental.
For bigger XML elements the whole procedure may take a while,
so the philosophy should be to save the laboriously mapped
JSON data structure to a file once you have it. This of course
also means that this functions is probably of little value
when you have to constantly JSONify big XMLs. In summary,
this function is mostly useful for one-time parsing of XML to
JSON for subsequent use of the resulting JSON data instead of
the XML-formated data.
Args:
root: A XML element
tag_prefix: A tag prefix which will be cut from the keys
on_tag: User-defined parsing for elements identified by tag
Returns:
A Python data structure corresponding to the JSON mapping
of the supplied XML element
'''
def get_key(tag):
if tag_prefix is not None:
return tag.split(tag_prefix)[1]
return tag
def parse_element(elmt):
key = get_key(elmt.tag)
if key in on_tag:
return on_tag[key](elmt)
items = dict(elmt.items())
if len(elmt) == 0:
if items:
return { **items, **{key : elmt.text} }
else:
return elmt.text
else:
tags = {child.tag for child in elmt}
max_children = max({len(child) for child in elmt})
if len(tags) == 1:
value_list = [parse_element(child) for child in elmt]
if items:
return { **items, **{key : value_list} }
else:
return value_list
elif len(tags) > 1:
tag2children = {tag: [] for tag in tags}
for child in elmt:
tag2children[child.tag].append(child)
if max_children == 0:
value_dict = {get_key(tag) : [child.text for child in children] if len(children) > 1
else children[0].text
for tag, children in tag2children.items()}
else:
value_dict = {get_key(tag) : [parse_element(child) for child in children] if len(children) > 1
else parse_element(children[0])
for tag, children in tag2children.items()}
if items:
return { **items, **value_dict }
else:
return value_dict
# ---
return parse_element(root)
|
[
"def",
"xml_to_json",
"(",
"root",
",",
"tag_prefix",
"=",
"None",
",",
"on_tag",
"=",
"{",
"}",
")",
":",
"def",
"get_key",
"(",
"tag",
")",
":",
"if",
"tag_prefix",
"is",
"not",
"None",
":",
"return",
"tag",
".",
"split",
"(",
"tag_prefix",
")",
"[",
"1",
"]",
"return",
"tag",
"def",
"parse_element",
"(",
"elmt",
")",
":",
"key",
"=",
"get_key",
"(",
"elmt",
".",
"tag",
")",
"if",
"key",
"in",
"on_tag",
":",
"return",
"on_tag",
"[",
"key",
"]",
"(",
"elmt",
")",
"items",
"=",
"dict",
"(",
"elmt",
".",
"items",
"(",
")",
")",
"if",
"len",
"(",
"elmt",
")",
"==",
"0",
":",
"if",
"items",
":",
"return",
"{",
"*",
"*",
"items",
",",
"*",
"*",
"{",
"key",
":",
"elmt",
".",
"text",
"}",
"}",
"else",
":",
"return",
"elmt",
".",
"text",
"else",
":",
"tags",
"=",
"{",
"child",
".",
"tag",
"for",
"child",
"in",
"elmt",
"}",
"max_children",
"=",
"max",
"(",
"{",
"len",
"(",
"child",
")",
"for",
"child",
"in",
"elmt",
"}",
")",
"if",
"len",
"(",
"tags",
")",
"==",
"1",
":",
"value_list",
"=",
"[",
"parse_element",
"(",
"child",
")",
"for",
"child",
"in",
"elmt",
"]",
"if",
"items",
":",
"return",
"{",
"*",
"*",
"items",
",",
"*",
"*",
"{",
"key",
":",
"value_list",
"}",
"}",
"else",
":",
"return",
"value_list",
"elif",
"len",
"(",
"tags",
")",
">",
"1",
":",
"tag2children",
"=",
"{",
"tag",
":",
"[",
"]",
"for",
"tag",
"in",
"tags",
"}",
"for",
"child",
"in",
"elmt",
":",
"tag2children",
"[",
"child",
".",
"tag",
"]",
".",
"append",
"(",
"child",
")",
"if",
"max_children",
"==",
"0",
":",
"value_dict",
"=",
"{",
"get_key",
"(",
"tag",
")",
":",
"[",
"child",
".",
"text",
"for",
"child",
"in",
"children",
"]",
"if",
"len",
"(",
"children",
")",
">",
"1",
"else",
"children",
"[",
"0",
"]",
".",
"text",
"for",
"tag",
",",
"children",
"in",
"tag2children",
".",
"items",
"(",
")",
"}",
"else",
":",
"value_dict",
"=",
"{",
"get_key",
"(",
"tag",
")",
":",
"[",
"parse_element",
"(",
"child",
")",
"for",
"child",
"in",
"children",
"]",
"if",
"len",
"(",
"children",
")",
">",
"1",
"else",
"parse_element",
"(",
"children",
"[",
"0",
"]",
")",
"for",
"tag",
",",
"children",
"in",
"tag2children",
".",
"items",
"(",
")",
"}",
"if",
"items",
":",
"return",
"{",
"*",
"*",
"items",
",",
"*",
"*",
"value_dict",
"}",
"else",
":",
"return",
"value_dict",
"# ---",
"return",
"parse_element",
"(",
"root",
")"
] |
Parses a XML element to JSON format.
This is a relatively generic function parsing a XML element
to JSON format. It does not guarantee any specific formal
behaviour but is empirically known to "work well" with respect
to the author's needs. External verification of the returned
results by the user is therefore instrumental.
For bigger XML elements the whole procedure may take a while,
so the philosophy should be to save the laboriously mapped
JSON data structure to a file once you have it. This of course
also means that this functions is probably of little value
when you have to constantly JSONify big XMLs. In summary,
this function is mostly useful for one-time parsing of XML to
JSON for subsequent use of the resulting JSON data instead of
the XML-formated data.
Args:
root: A XML element
tag_prefix: A tag prefix which will be cut from the keys
on_tag: User-defined parsing for elements identified by tag
Returns:
A Python data structure corresponding to the JSON mapping
of the supplied XML element
|
[
"Parses",
"a",
"XML",
"element",
"to",
"JSON",
"format",
"."
] |
270c5c5fc6361094a5e2f93fc2ad84c2fe9b6ce5
|
https://github.com/BioMapOrg/biomap-utils/blob/270c5c5fc6361094a5e2f93fc2ad84c2fe9b6ce5/biomap/utils/xml.py#L1-L75
|
243,016
|
mayfield/shellish
|
shellish/rendering/vtml.py
|
_textwrap_slices
|
def _textwrap_slices(text, width, strip_leading_indent=False):
""" Nearly identical to textwrap.wrap except this routine is a tad bit
safer in its algo that textwrap. I ran into some issues with textwrap
output that make it unusable to this usecase as a baseline text wrapper.
Further this utility returns slices instead of strings. So the slices
can be used to extract your lines manually. """
if not isinstance(text, str):
raise TypeError("Expected `str` type")
chunks = (x for x in _textwrap_word_break.split(text) if x)
remaining = width
buf = []
lines = [buf]
whitespace = []
whitespace_len = 0
pos = 0
try:
chunk = next(chunks)
except StopIteration:
chunk = ''
if not strip_leading_indent and is_whitespace(chunk):
# Add leading indent for first line, but only up to one lines worth.
chunk_len = len(chunk)
if chunk_len >= width:
_add_slice(buf, slice(0, width))
buf = []
lines.append(buf)
else:
_add_slice(buf, slice(0, chunk_len))
remaining -= chunk_len
pos = chunk_len
try:
chunk = next(chunks)
except StopIteration:
chunk = ''
while True:
avail_len = remaining - whitespace_len
chunk_len = len(chunk)
if chunk == '\n':
buf = []
lines.append(buf)
whitespace = []
whitespace_len = 0
remaining = width
elif is_whitespace(chunk):
if buf:
_add_slice(whitespace, slice(pos, pos + chunk_len))
whitespace_len += chunk_len
elif len(chunk) > avail_len:
if not buf:
# Must hard split the chunk.
for x in whitespace:
_add_slice(buf, x)
_add_slice(buf, slice(pos, pos + avail_len))
chunk = chunk[avail_len:]
pos += avail_len
# Bump to next line without fetching the next chunk.
buf = []
lines.append(buf)
whitespace = []
whitespace_len = 0
remaining = width
continue
else:
if buf:
remaining -= whitespace_len
for x in whitespace:
_add_slice(buf, x)
whitespace = []
whitespace_len = 0
_add_slice(buf, slice(pos, pos + chunk_len))
remaining -= chunk_len
pos += chunk_len
try:
chunk = next(chunks)
except StopIteration:
break
return lines
|
python
|
def _textwrap_slices(text, width, strip_leading_indent=False):
""" Nearly identical to textwrap.wrap except this routine is a tad bit
safer in its algo that textwrap. I ran into some issues with textwrap
output that make it unusable to this usecase as a baseline text wrapper.
Further this utility returns slices instead of strings. So the slices
can be used to extract your lines manually. """
if not isinstance(text, str):
raise TypeError("Expected `str` type")
chunks = (x for x in _textwrap_word_break.split(text) if x)
remaining = width
buf = []
lines = [buf]
whitespace = []
whitespace_len = 0
pos = 0
try:
chunk = next(chunks)
except StopIteration:
chunk = ''
if not strip_leading_indent and is_whitespace(chunk):
# Add leading indent for first line, but only up to one lines worth.
chunk_len = len(chunk)
if chunk_len >= width:
_add_slice(buf, slice(0, width))
buf = []
lines.append(buf)
else:
_add_slice(buf, slice(0, chunk_len))
remaining -= chunk_len
pos = chunk_len
try:
chunk = next(chunks)
except StopIteration:
chunk = ''
while True:
avail_len = remaining - whitespace_len
chunk_len = len(chunk)
if chunk == '\n':
buf = []
lines.append(buf)
whitespace = []
whitespace_len = 0
remaining = width
elif is_whitespace(chunk):
if buf:
_add_slice(whitespace, slice(pos, pos + chunk_len))
whitespace_len += chunk_len
elif len(chunk) > avail_len:
if not buf:
# Must hard split the chunk.
for x in whitespace:
_add_slice(buf, x)
_add_slice(buf, slice(pos, pos + avail_len))
chunk = chunk[avail_len:]
pos += avail_len
# Bump to next line without fetching the next chunk.
buf = []
lines.append(buf)
whitespace = []
whitespace_len = 0
remaining = width
continue
else:
if buf:
remaining -= whitespace_len
for x in whitespace:
_add_slice(buf, x)
whitespace = []
whitespace_len = 0
_add_slice(buf, slice(pos, pos + chunk_len))
remaining -= chunk_len
pos += chunk_len
try:
chunk = next(chunks)
except StopIteration:
break
return lines
|
[
"def",
"_textwrap_slices",
"(",
"text",
",",
"width",
",",
"strip_leading_indent",
"=",
"False",
")",
":",
"if",
"not",
"isinstance",
"(",
"text",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"Expected `str` type\"",
")",
"chunks",
"=",
"(",
"x",
"for",
"x",
"in",
"_textwrap_word_break",
".",
"split",
"(",
"text",
")",
"if",
"x",
")",
"remaining",
"=",
"width",
"buf",
"=",
"[",
"]",
"lines",
"=",
"[",
"buf",
"]",
"whitespace",
"=",
"[",
"]",
"whitespace_len",
"=",
"0",
"pos",
"=",
"0",
"try",
":",
"chunk",
"=",
"next",
"(",
"chunks",
")",
"except",
"StopIteration",
":",
"chunk",
"=",
"''",
"if",
"not",
"strip_leading_indent",
"and",
"is_whitespace",
"(",
"chunk",
")",
":",
"# Add leading indent for first line, but only up to one lines worth.",
"chunk_len",
"=",
"len",
"(",
"chunk",
")",
"if",
"chunk_len",
">=",
"width",
":",
"_add_slice",
"(",
"buf",
",",
"slice",
"(",
"0",
",",
"width",
")",
")",
"buf",
"=",
"[",
"]",
"lines",
".",
"append",
"(",
"buf",
")",
"else",
":",
"_add_slice",
"(",
"buf",
",",
"slice",
"(",
"0",
",",
"chunk_len",
")",
")",
"remaining",
"-=",
"chunk_len",
"pos",
"=",
"chunk_len",
"try",
":",
"chunk",
"=",
"next",
"(",
"chunks",
")",
"except",
"StopIteration",
":",
"chunk",
"=",
"''",
"while",
"True",
":",
"avail_len",
"=",
"remaining",
"-",
"whitespace_len",
"chunk_len",
"=",
"len",
"(",
"chunk",
")",
"if",
"chunk",
"==",
"'\\n'",
":",
"buf",
"=",
"[",
"]",
"lines",
".",
"append",
"(",
"buf",
")",
"whitespace",
"=",
"[",
"]",
"whitespace_len",
"=",
"0",
"remaining",
"=",
"width",
"elif",
"is_whitespace",
"(",
"chunk",
")",
":",
"if",
"buf",
":",
"_add_slice",
"(",
"whitespace",
",",
"slice",
"(",
"pos",
",",
"pos",
"+",
"chunk_len",
")",
")",
"whitespace_len",
"+=",
"chunk_len",
"elif",
"len",
"(",
"chunk",
")",
">",
"avail_len",
":",
"if",
"not",
"buf",
":",
"# Must hard split the chunk.",
"for",
"x",
"in",
"whitespace",
":",
"_add_slice",
"(",
"buf",
",",
"x",
")",
"_add_slice",
"(",
"buf",
",",
"slice",
"(",
"pos",
",",
"pos",
"+",
"avail_len",
")",
")",
"chunk",
"=",
"chunk",
"[",
"avail_len",
":",
"]",
"pos",
"+=",
"avail_len",
"# Bump to next line without fetching the next chunk.",
"buf",
"=",
"[",
"]",
"lines",
".",
"append",
"(",
"buf",
")",
"whitespace",
"=",
"[",
"]",
"whitespace_len",
"=",
"0",
"remaining",
"=",
"width",
"continue",
"else",
":",
"if",
"buf",
":",
"remaining",
"-=",
"whitespace_len",
"for",
"x",
"in",
"whitespace",
":",
"_add_slice",
"(",
"buf",
",",
"x",
")",
"whitespace",
"=",
"[",
"]",
"whitespace_len",
"=",
"0",
"_add_slice",
"(",
"buf",
",",
"slice",
"(",
"pos",
",",
"pos",
"+",
"chunk_len",
")",
")",
"remaining",
"-=",
"chunk_len",
"pos",
"+=",
"chunk_len",
"try",
":",
"chunk",
"=",
"next",
"(",
"chunks",
")",
"except",
"StopIteration",
":",
"break",
"return",
"lines"
] |
Nearly identical to textwrap.wrap except this routine is a tad bit
safer in its algo that textwrap. I ran into some issues with textwrap
output that make it unusable to this usecase as a baseline text wrapper.
Further this utility returns slices instead of strings. So the slices
can be used to extract your lines manually.
|
[
"Nearly",
"identical",
"to",
"textwrap",
".",
"wrap",
"except",
"this",
"routine",
"is",
"a",
"tad",
"bit",
"safer",
"in",
"its",
"algo",
"that",
"textwrap",
".",
"I",
"ran",
"into",
"some",
"issues",
"with",
"textwrap",
"output",
"that",
"make",
"it",
"unusable",
"to",
"this",
"usecase",
"as",
"a",
"baseline",
"text",
"wrapper",
".",
"Further",
"this",
"utility",
"returns",
"slices",
"instead",
"of",
"strings",
".",
"So",
"the",
"slices",
"can",
"be",
"used",
"to",
"extract",
"your",
"lines",
"manually",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/rendering/vtml.py#L73-L149
|
243,017
|
mayfield/shellish
|
shellish/rendering/vtml.py
|
vtmlrender
|
def vtmlrender(vtmarkup, plain=None, strict=False, vtmlparser=VTMLParser()):
""" Look for vt100 markup and render vt opcodes into a VTMLBuffer. """
if isinstance(vtmarkup, VTMLBuffer):
return vtmarkup.plain() if plain else vtmarkup
try:
vtmlparser.feed(vtmarkup)
vtmlparser.close()
except:
if strict:
raise
buf = VTMLBuffer()
buf.append_str(str(vtmarkup))
return buf
else:
buf = vtmlparser.getvalue()
return buf.plain() if plain else buf
finally:
vtmlparser.reset()
|
python
|
def vtmlrender(vtmarkup, plain=None, strict=False, vtmlparser=VTMLParser()):
""" Look for vt100 markup and render vt opcodes into a VTMLBuffer. """
if isinstance(vtmarkup, VTMLBuffer):
return vtmarkup.plain() if plain else vtmarkup
try:
vtmlparser.feed(vtmarkup)
vtmlparser.close()
except:
if strict:
raise
buf = VTMLBuffer()
buf.append_str(str(vtmarkup))
return buf
else:
buf = vtmlparser.getvalue()
return buf.plain() if plain else buf
finally:
vtmlparser.reset()
|
[
"def",
"vtmlrender",
"(",
"vtmarkup",
",",
"plain",
"=",
"None",
",",
"strict",
"=",
"False",
",",
"vtmlparser",
"=",
"VTMLParser",
"(",
")",
")",
":",
"if",
"isinstance",
"(",
"vtmarkup",
",",
"VTMLBuffer",
")",
":",
"return",
"vtmarkup",
".",
"plain",
"(",
")",
"if",
"plain",
"else",
"vtmarkup",
"try",
":",
"vtmlparser",
".",
"feed",
"(",
"vtmarkup",
")",
"vtmlparser",
".",
"close",
"(",
")",
"except",
":",
"if",
"strict",
":",
"raise",
"buf",
"=",
"VTMLBuffer",
"(",
")",
"buf",
".",
"append_str",
"(",
"str",
"(",
"vtmarkup",
")",
")",
"return",
"buf",
"else",
":",
"buf",
"=",
"vtmlparser",
".",
"getvalue",
"(",
")",
"return",
"buf",
".",
"plain",
"(",
")",
"if",
"plain",
"else",
"buf",
"finally",
":",
"vtmlparser",
".",
"reset",
"(",
")"
] |
Look for vt100 markup and render vt opcodes into a VTMLBuffer.
|
[
"Look",
"for",
"vt100",
"markup",
"and",
"render",
"vt",
"opcodes",
"into",
"a",
"VTMLBuffer",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/rendering/vtml.py#L554-L571
|
243,018
|
tBaxter/tango-shared-core
|
build/lib/tango_shared/utils/sanetize.py
|
clean_text
|
def clean_text(value, topic=False):
"""
Replaces "profane" words with more suitable ones.
Uses bleach to strip all but whitelisted html.
Converts bbcode to Markdown
"""
for x in PROFANITY_REPLACEMENTS:
value = value.replace(x[0], x[1])
for bbset in BBCODE_REPLACEMENTS:
p = re.compile(bbset[0], re.DOTALL)
value = p.sub(bbset[1], value)
bleached = bleach.clean(value, tags=ALLOWED_TAGS, attributes=ALLOWED_ATTRIBUTES, strip=True)
# We want to retain markdown quotes and we'll be running bleach again in format_post.
bleached = bleached.replace('>', '>').replace('&', '&')
return bleached
|
python
|
def clean_text(value, topic=False):
"""
Replaces "profane" words with more suitable ones.
Uses bleach to strip all but whitelisted html.
Converts bbcode to Markdown
"""
for x in PROFANITY_REPLACEMENTS:
value = value.replace(x[0], x[1])
for bbset in BBCODE_REPLACEMENTS:
p = re.compile(bbset[0], re.DOTALL)
value = p.sub(bbset[1], value)
bleached = bleach.clean(value, tags=ALLOWED_TAGS, attributes=ALLOWED_ATTRIBUTES, strip=True)
# We want to retain markdown quotes and we'll be running bleach again in format_post.
bleached = bleached.replace('>', '>').replace('&', '&')
return bleached
|
[
"def",
"clean_text",
"(",
"value",
",",
"topic",
"=",
"False",
")",
":",
"for",
"x",
"in",
"PROFANITY_REPLACEMENTS",
":",
"value",
"=",
"value",
".",
"replace",
"(",
"x",
"[",
"0",
"]",
",",
"x",
"[",
"1",
"]",
")",
"for",
"bbset",
"in",
"BBCODE_REPLACEMENTS",
":",
"p",
"=",
"re",
".",
"compile",
"(",
"bbset",
"[",
"0",
"]",
",",
"re",
".",
"DOTALL",
")",
"value",
"=",
"p",
".",
"sub",
"(",
"bbset",
"[",
"1",
"]",
",",
"value",
")",
"bleached",
"=",
"bleach",
".",
"clean",
"(",
"value",
",",
"tags",
"=",
"ALLOWED_TAGS",
",",
"attributes",
"=",
"ALLOWED_ATTRIBUTES",
",",
"strip",
"=",
"True",
")",
"# We want to retain markdown quotes and we'll be running bleach again in format_post.",
"bleached",
"=",
"bleached",
".",
"replace",
"(",
"'>'",
",",
"'>'",
")",
".",
"replace",
"(",
"'&'",
",",
"'&'",
")",
"return",
"bleached"
] |
Replaces "profane" words with more suitable ones.
Uses bleach to strip all but whitelisted html.
Converts bbcode to Markdown
|
[
"Replaces",
"profane",
"words",
"with",
"more",
"suitable",
"ones",
".",
"Uses",
"bleach",
"to",
"strip",
"all",
"but",
"whitelisted",
"html",
".",
"Converts",
"bbcode",
"to",
"Markdown"
] |
35fc10aef1ceedcdb4d6d866d44a22efff718812
|
https://github.com/tBaxter/tango-shared-core/blob/35fc10aef1ceedcdb4d6d866d44a22efff718812/build/lib/tango_shared/utils/sanetize.py#L133-L150
|
243,019
|
tBaxter/tango-shared-core
|
build/lib/tango_shared/utils/sanetize.py
|
is_email_simple
|
def is_email_simple(value):
"""Return True if value looks like an email address."""
# An @ must be in the middle of the value.
if '@' not in value or value.startswith('@') or value.endswith('@'):
return False
try:
p1, p2 = value.split('@')
except ValueError:
# value contains more than one @.
return False
# Dot must be in p2 (e.g. example.com)
if '.' not in p2 or p2.startswith('.'):
return False
return True
|
python
|
def is_email_simple(value):
"""Return True if value looks like an email address."""
# An @ must be in the middle of the value.
if '@' not in value or value.startswith('@') or value.endswith('@'):
return False
try:
p1, p2 = value.split('@')
except ValueError:
# value contains more than one @.
return False
# Dot must be in p2 (e.g. example.com)
if '.' not in p2 or p2.startswith('.'):
return False
return True
|
[
"def",
"is_email_simple",
"(",
"value",
")",
":",
"# An @ must be in the middle of the value.",
"if",
"'@'",
"not",
"in",
"value",
"or",
"value",
".",
"startswith",
"(",
"'@'",
")",
"or",
"value",
".",
"endswith",
"(",
"'@'",
")",
":",
"return",
"False",
"try",
":",
"p1",
",",
"p2",
"=",
"value",
".",
"split",
"(",
"'@'",
")",
"except",
"ValueError",
":",
"# value contains more than one @.",
"return",
"False",
"# Dot must be in p2 (e.g. example.com)",
"if",
"'.'",
"not",
"in",
"p2",
"or",
"p2",
".",
"startswith",
"(",
"'.'",
")",
":",
"return",
"False",
"return",
"True"
] |
Return True if value looks like an email address.
|
[
"Return",
"True",
"if",
"value",
"looks",
"like",
"an",
"email",
"address",
"."
] |
35fc10aef1ceedcdb4d6d866d44a22efff718812
|
https://github.com/tBaxter/tango-shared-core/blob/35fc10aef1ceedcdb4d6d866d44a22efff718812/build/lib/tango_shared/utils/sanetize.py#L153-L166
|
243,020
|
tBaxter/tango-shared-core
|
build/lib/tango_shared/utils/sanetize.py
|
convert_links
|
def convert_links(text, trim_url_limit=None, nofollow=False, autoescape=False):
"""
Finds URLs in text and attempts to handle correctly.
Heavily based on django.utils.html.urlize
With the additions of attempting to embed media links, particularly images.
Works on http://, https://, www. links, and also on links ending in one of
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
Links can have trailing punctuation (periods, commas, close-parens) and
leading punctuation (opening parens) and it'll still do the right thing.
TO-DO: refactor to better leverage existing django.utils.html
"""
safe_input = isinstance(text, SafeData)
words = word_split_re.split(force_text(text))
for i, word in enumerate(words):
if '.' in word or ':' in word:
# Deal with punctuation.
lead, middle, trail = '', word, ''
stripped = middle.rstrip(TRAILING_PUNCTUATION_CHARS)
if middle != stripped:
trail = middle[len(stripped):] + trail
middle = stripped
for opening, closing in WRAPPING_PUNCTUATION:
if middle.startswith(opening):
middle = middle[len(opening):]
lead = lead + opening
# Keep parentheses at the end only if they're balanced.
if (middle.endswith(closing)
and middle.count(closing) == middle.count(opening) + 1):
middle = middle[:-len(closing)]
trail = closing + trail
# Make URL we want to point to.
url = None
if simple_url_re.match(middle):
url = smart_urlquote(middle)
elif simple_url_2_re.match(middle):
url = smart_urlquote('http://%s' % middle)
elif ':' not in middle and is_email_simple(middle):
local, domain = middle.rsplit('@', 1)
try:
domain = domain.encode('idna').decode('ascii')
except UnicodeError:
continue
if url:
u = url.lower()
if autoescape and not safe_input:
lead, trail = escape(lead), escape(trail)
url = escape(url)
# Photos
if u.endswith('.jpg') or u.endswith('.gif') or u.endswith('.png'):
middle = '<img src="%s">' % url
# Youtube
#'https://www.youtube.com/watch?v=gkqXgaUuxZg'
elif 'youtube.com/watch' in url:
parsed = urlparse.urlsplit(url)
query = urlparse.parse_qs(parsed.query)
token = query.get('v')
if token and len(token) > 0:
middle = '<iframe src="http://www.youtube.com/embed/%s" height="320" width="100%%"></iframe>' % token[0]
else:
middle = url
elif 'youtu.be/' in url:
try:
token = url.rsplit('/', 1)[1]
middle = '<iframe src="http://www.youtube.com/embed/%s" height="320" width="100%%"></iframe>' % token
except IndexError:
middle = six.u(url)
words[i] = mark_safe('%s%s%s' % (lead, middle, trail))
else:
if safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
elif safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
return ''.join(words)
|
python
|
def convert_links(text, trim_url_limit=None, nofollow=False, autoescape=False):
"""
Finds URLs in text and attempts to handle correctly.
Heavily based on django.utils.html.urlize
With the additions of attempting to embed media links, particularly images.
Works on http://, https://, www. links, and also on links ending in one of
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
Links can have trailing punctuation (periods, commas, close-parens) and
leading punctuation (opening parens) and it'll still do the right thing.
TO-DO: refactor to better leverage existing django.utils.html
"""
safe_input = isinstance(text, SafeData)
words = word_split_re.split(force_text(text))
for i, word in enumerate(words):
if '.' in word or ':' in word:
# Deal with punctuation.
lead, middle, trail = '', word, ''
stripped = middle.rstrip(TRAILING_PUNCTUATION_CHARS)
if middle != stripped:
trail = middle[len(stripped):] + trail
middle = stripped
for opening, closing in WRAPPING_PUNCTUATION:
if middle.startswith(opening):
middle = middle[len(opening):]
lead = lead + opening
# Keep parentheses at the end only if they're balanced.
if (middle.endswith(closing)
and middle.count(closing) == middle.count(opening) + 1):
middle = middle[:-len(closing)]
trail = closing + trail
# Make URL we want to point to.
url = None
if simple_url_re.match(middle):
url = smart_urlquote(middle)
elif simple_url_2_re.match(middle):
url = smart_urlquote('http://%s' % middle)
elif ':' not in middle and is_email_simple(middle):
local, domain = middle.rsplit('@', 1)
try:
domain = domain.encode('idna').decode('ascii')
except UnicodeError:
continue
if url:
u = url.lower()
if autoescape and not safe_input:
lead, trail = escape(lead), escape(trail)
url = escape(url)
# Photos
if u.endswith('.jpg') or u.endswith('.gif') or u.endswith('.png'):
middle = '<img src="%s">' % url
# Youtube
#'https://www.youtube.com/watch?v=gkqXgaUuxZg'
elif 'youtube.com/watch' in url:
parsed = urlparse.urlsplit(url)
query = urlparse.parse_qs(parsed.query)
token = query.get('v')
if token and len(token) > 0:
middle = '<iframe src="http://www.youtube.com/embed/%s" height="320" width="100%%"></iframe>' % token[0]
else:
middle = url
elif 'youtu.be/' in url:
try:
token = url.rsplit('/', 1)[1]
middle = '<iframe src="http://www.youtube.com/embed/%s" height="320" width="100%%"></iframe>' % token
except IndexError:
middle = six.u(url)
words[i] = mark_safe('%s%s%s' % (lead, middle, trail))
else:
if safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
elif safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
return ''.join(words)
|
[
"def",
"convert_links",
"(",
"text",
",",
"trim_url_limit",
"=",
"None",
",",
"nofollow",
"=",
"False",
",",
"autoescape",
"=",
"False",
")",
":",
"safe_input",
"=",
"isinstance",
"(",
"text",
",",
"SafeData",
")",
"words",
"=",
"word_split_re",
".",
"split",
"(",
"force_text",
"(",
"text",
")",
")",
"for",
"i",
",",
"word",
"in",
"enumerate",
"(",
"words",
")",
":",
"if",
"'.'",
"in",
"word",
"or",
"':'",
"in",
"word",
":",
"# Deal with punctuation.",
"lead",
",",
"middle",
",",
"trail",
"=",
"''",
",",
"word",
",",
"''",
"stripped",
"=",
"middle",
".",
"rstrip",
"(",
"TRAILING_PUNCTUATION_CHARS",
")",
"if",
"middle",
"!=",
"stripped",
":",
"trail",
"=",
"middle",
"[",
"len",
"(",
"stripped",
")",
":",
"]",
"+",
"trail",
"middle",
"=",
"stripped",
"for",
"opening",
",",
"closing",
"in",
"WRAPPING_PUNCTUATION",
":",
"if",
"middle",
".",
"startswith",
"(",
"opening",
")",
":",
"middle",
"=",
"middle",
"[",
"len",
"(",
"opening",
")",
":",
"]",
"lead",
"=",
"lead",
"+",
"opening",
"# Keep parentheses at the end only if they're balanced.",
"if",
"(",
"middle",
".",
"endswith",
"(",
"closing",
")",
"and",
"middle",
".",
"count",
"(",
"closing",
")",
"==",
"middle",
".",
"count",
"(",
"opening",
")",
"+",
"1",
")",
":",
"middle",
"=",
"middle",
"[",
":",
"-",
"len",
"(",
"closing",
")",
"]",
"trail",
"=",
"closing",
"+",
"trail",
"# Make URL we want to point to.",
"url",
"=",
"None",
"if",
"simple_url_re",
".",
"match",
"(",
"middle",
")",
":",
"url",
"=",
"smart_urlquote",
"(",
"middle",
")",
"elif",
"simple_url_2_re",
".",
"match",
"(",
"middle",
")",
":",
"url",
"=",
"smart_urlquote",
"(",
"'http://%s'",
"%",
"middle",
")",
"elif",
"':'",
"not",
"in",
"middle",
"and",
"is_email_simple",
"(",
"middle",
")",
":",
"local",
",",
"domain",
"=",
"middle",
".",
"rsplit",
"(",
"'@'",
",",
"1",
")",
"try",
":",
"domain",
"=",
"domain",
".",
"encode",
"(",
"'idna'",
")",
".",
"decode",
"(",
"'ascii'",
")",
"except",
"UnicodeError",
":",
"continue",
"if",
"url",
":",
"u",
"=",
"url",
".",
"lower",
"(",
")",
"if",
"autoescape",
"and",
"not",
"safe_input",
":",
"lead",
",",
"trail",
"=",
"escape",
"(",
"lead",
")",
",",
"escape",
"(",
"trail",
")",
"url",
"=",
"escape",
"(",
"url",
")",
"# Photos",
"if",
"u",
".",
"endswith",
"(",
"'.jpg'",
")",
"or",
"u",
".",
"endswith",
"(",
"'.gif'",
")",
"or",
"u",
".",
"endswith",
"(",
"'.png'",
")",
":",
"middle",
"=",
"'<img src=\"%s\">'",
"%",
"url",
"# Youtube",
"#'https://www.youtube.com/watch?v=gkqXgaUuxZg'",
"elif",
"'youtube.com/watch'",
"in",
"url",
":",
"parsed",
"=",
"urlparse",
".",
"urlsplit",
"(",
"url",
")",
"query",
"=",
"urlparse",
".",
"parse_qs",
"(",
"parsed",
".",
"query",
")",
"token",
"=",
"query",
".",
"get",
"(",
"'v'",
")",
"if",
"token",
"and",
"len",
"(",
"token",
")",
">",
"0",
":",
"middle",
"=",
"'<iframe src=\"http://www.youtube.com/embed/%s\" height=\"320\" width=\"100%%\"></iframe>'",
"%",
"token",
"[",
"0",
"]",
"else",
":",
"middle",
"=",
"url",
"elif",
"'youtu.be/'",
"in",
"url",
":",
"try",
":",
"token",
"=",
"url",
".",
"rsplit",
"(",
"'/'",
",",
"1",
")",
"[",
"1",
"]",
"middle",
"=",
"'<iframe src=\"http://www.youtube.com/embed/%s\" height=\"320\" width=\"100%%\"></iframe>'",
"%",
"token",
"except",
"IndexError",
":",
"middle",
"=",
"six",
".",
"u",
"(",
"url",
")",
"words",
"[",
"i",
"]",
"=",
"mark_safe",
"(",
"'%s%s%s'",
"%",
"(",
"lead",
",",
"middle",
",",
"trail",
")",
")",
"else",
":",
"if",
"safe_input",
":",
"words",
"[",
"i",
"]",
"=",
"mark_safe",
"(",
"word",
")",
"elif",
"autoescape",
":",
"words",
"[",
"i",
"]",
"=",
"escape",
"(",
"word",
")",
"elif",
"safe_input",
":",
"words",
"[",
"i",
"]",
"=",
"mark_safe",
"(",
"word",
")",
"elif",
"autoescape",
":",
"words",
"[",
"i",
"]",
"=",
"escape",
"(",
"word",
")",
"return",
"''",
".",
"join",
"(",
"words",
")"
] |
Finds URLs in text and attempts to handle correctly.
Heavily based on django.utils.html.urlize
With the additions of attempting to embed media links, particularly images.
Works on http://, https://, www. links, and also on links ending in one of
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
Links can have trailing punctuation (periods, commas, close-parens) and
leading punctuation (opening parens) and it'll still do the right thing.
TO-DO: refactor to better leverage existing django.utils.html
|
[
"Finds",
"URLs",
"in",
"text",
"and",
"attempts",
"to",
"handle",
"correctly",
".",
"Heavily",
"based",
"on",
"django",
".",
"utils",
".",
"html",
".",
"urlize",
"With",
"the",
"additions",
"of",
"attempting",
"to",
"embed",
"media",
"links",
"particularly",
"images",
"."
] |
35fc10aef1ceedcdb4d6d866d44a22efff718812
|
https://github.com/tBaxter/tango-shared-core/blob/35fc10aef1ceedcdb4d6d866d44a22efff718812/build/lib/tango_shared/utils/sanetize.py#L169-L254
|
243,021
|
mrstephenneal/dirutility
|
dirutility/multiprocess.py
|
pool_process
|
def pool_process(func, iterable, cpus=cpu_count(), return_vals=False, cpu_reduction=0, progress_bar=False):
"""
Multiprocessing helper function for performing looped operation using multiple processors.
:param func: Function to call
:param iterable: Iterable object to perform each function on
:param cpus: Number of cpu cores, defaults to system's cpu count
:param return_vals: Bool, returns output values when True
:param cpu_reduction: Number of cpu core's to not use
:param progress_bar: Display text based progress bar
:return:
"""
with Pool(cpus - abs(cpu_reduction)) as pool:
# Return values returned by 'func'
if return_vals:
# Show progress bar
if progress_bar:
vals = [v for v in tqdm(pool.imap_unordered(func, iterable), total=len(iterable))]
# No progress bar
else:
vals = pool.map(func, iterable)
# Close pool and return values
pool.close()
# pool.join()
return vals
# Don't capture values returned by 'func'
else:
pool.map(func, iterable)
pool.close()
return True
|
python
|
def pool_process(func, iterable, cpus=cpu_count(), return_vals=False, cpu_reduction=0, progress_bar=False):
"""
Multiprocessing helper function for performing looped operation using multiple processors.
:param func: Function to call
:param iterable: Iterable object to perform each function on
:param cpus: Number of cpu cores, defaults to system's cpu count
:param return_vals: Bool, returns output values when True
:param cpu_reduction: Number of cpu core's to not use
:param progress_bar: Display text based progress bar
:return:
"""
with Pool(cpus - abs(cpu_reduction)) as pool:
# Return values returned by 'func'
if return_vals:
# Show progress bar
if progress_bar:
vals = [v for v in tqdm(pool.imap_unordered(func, iterable), total=len(iterable))]
# No progress bar
else:
vals = pool.map(func, iterable)
# Close pool and return values
pool.close()
# pool.join()
return vals
# Don't capture values returned by 'func'
else:
pool.map(func, iterable)
pool.close()
return True
|
[
"def",
"pool_process",
"(",
"func",
",",
"iterable",
",",
"cpus",
"=",
"cpu_count",
"(",
")",
",",
"return_vals",
"=",
"False",
",",
"cpu_reduction",
"=",
"0",
",",
"progress_bar",
"=",
"False",
")",
":",
"with",
"Pool",
"(",
"cpus",
"-",
"abs",
"(",
"cpu_reduction",
")",
")",
"as",
"pool",
":",
"# Return values returned by 'func'",
"if",
"return_vals",
":",
"# Show progress bar",
"if",
"progress_bar",
":",
"vals",
"=",
"[",
"v",
"for",
"v",
"in",
"tqdm",
"(",
"pool",
".",
"imap_unordered",
"(",
"func",
",",
"iterable",
")",
",",
"total",
"=",
"len",
"(",
"iterable",
")",
")",
"]",
"# No progress bar",
"else",
":",
"vals",
"=",
"pool",
".",
"map",
"(",
"func",
",",
"iterable",
")",
"# Close pool and return values",
"pool",
".",
"close",
"(",
")",
"# pool.join()",
"return",
"vals",
"# Don't capture values returned by 'func'",
"else",
":",
"pool",
".",
"map",
"(",
"func",
",",
"iterable",
")",
"pool",
".",
"close",
"(",
")",
"return",
"True"
] |
Multiprocessing helper function for performing looped operation using multiple processors.
:param func: Function to call
:param iterable: Iterable object to perform each function on
:param cpus: Number of cpu cores, defaults to system's cpu count
:param return_vals: Bool, returns output values when True
:param cpu_reduction: Number of cpu core's to not use
:param progress_bar: Display text based progress bar
:return:
|
[
"Multiprocessing",
"helper",
"function",
"for",
"performing",
"looped",
"operation",
"using",
"multiple",
"processors",
"."
] |
339378659e2d7e09c53acfc51c5df745bb0cd517
|
https://github.com/mrstephenneal/dirutility/blob/339378659e2d7e09c53acfc51c5df745bb0cd517/dirutility/multiprocess.py#L6-L38
|
243,022
|
mrstephenneal/dirutility
|
dirutility/multiprocess.py
|
PoolProcess.map
|
def map(self):
"""Perform a function on every item in an iterable."""
with Pool(self.cpu_count) as pool:
pool.map(self._func, self._iterable)
pool.close()
return True
|
python
|
def map(self):
"""Perform a function on every item in an iterable."""
with Pool(self.cpu_count) as pool:
pool.map(self._func, self._iterable)
pool.close()
return True
|
[
"def",
"map",
"(",
"self",
")",
":",
"with",
"Pool",
"(",
"self",
".",
"cpu_count",
")",
"as",
"pool",
":",
"pool",
".",
"map",
"(",
"self",
".",
"_func",
",",
"self",
".",
"_iterable",
")",
"pool",
".",
"close",
"(",
")",
"return",
"True"
] |
Perform a function on every item in an iterable.
|
[
"Perform",
"a",
"function",
"on",
"every",
"item",
"in",
"an",
"iterable",
"."
] |
339378659e2d7e09c53acfc51c5df745bb0cd517
|
https://github.com/mrstephenneal/dirutility/blob/339378659e2d7e09c53acfc51c5df745bb0cd517/dirutility/multiprocess.py#L58-L63
|
243,023
|
mrstephenneal/dirutility
|
dirutility/multiprocess.py
|
PoolProcess.map_return
|
def map_return(self):
"""Perform a function on every item and return a list of yield values."""
with Pool(self.cpu_count) as pool:
vals = pool.map(self._func, self._iterable)
pool.close()
return vals
|
python
|
def map_return(self):
"""Perform a function on every item and return a list of yield values."""
with Pool(self.cpu_count) as pool:
vals = pool.map(self._func, self._iterable)
pool.close()
return vals
|
[
"def",
"map_return",
"(",
"self",
")",
":",
"with",
"Pool",
"(",
"self",
".",
"cpu_count",
")",
"as",
"pool",
":",
"vals",
"=",
"pool",
".",
"map",
"(",
"self",
".",
"_func",
",",
"self",
".",
"_iterable",
")",
"pool",
".",
"close",
"(",
")",
"return",
"vals"
] |
Perform a function on every item and return a list of yield values.
|
[
"Perform",
"a",
"function",
"on",
"every",
"item",
"and",
"return",
"a",
"list",
"of",
"yield",
"values",
"."
] |
339378659e2d7e09c53acfc51c5df745bb0cd517
|
https://github.com/mrstephenneal/dirutility/blob/339378659e2d7e09c53acfc51c5df745bb0cd517/dirutility/multiprocess.py#L65-L70
|
243,024
|
mrstephenneal/dirutility
|
dirutility/multiprocess.py
|
PoolProcess.map_tqdm
|
def map_tqdm(self):
"""
Perform a function on every item while displaying a progress bar.
:return: A list of yielded values
"""
with Pool(self.cpu_count) as pool:
vals = [v for v in tqdm(pool.imap_unordered(self._func, self._iterable), total=len(self._iterable))]
pool.close()
return vals
|
python
|
def map_tqdm(self):
"""
Perform a function on every item while displaying a progress bar.
:return: A list of yielded values
"""
with Pool(self.cpu_count) as pool:
vals = [v for v in tqdm(pool.imap_unordered(self._func, self._iterable), total=len(self._iterable))]
pool.close()
return vals
|
[
"def",
"map_tqdm",
"(",
"self",
")",
":",
"with",
"Pool",
"(",
"self",
".",
"cpu_count",
")",
"as",
"pool",
":",
"vals",
"=",
"[",
"v",
"for",
"v",
"in",
"tqdm",
"(",
"pool",
".",
"imap_unordered",
"(",
"self",
".",
"_func",
",",
"self",
".",
"_iterable",
")",
",",
"total",
"=",
"len",
"(",
"self",
".",
"_iterable",
")",
")",
"]",
"pool",
".",
"close",
"(",
")",
"return",
"vals"
] |
Perform a function on every item while displaying a progress bar.
:return: A list of yielded values
|
[
"Perform",
"a",
"function",
"on",
"every",
"item",
"while",
"displaying",
"a",
"progress",
"bar",
"."
] |
339378659e2d7e09c53acfc51c5df745bb0cd517
|
https://github.com/mrstephenneal/dirutility/blob/339378659e2d7e09c53acfc51c5df745bb0cd517/dirutility/multiprocess.py#L72-L81
|
243,025
|
tshlabs/tunic
|
tunic/core.py
|
split_by_line
|
def split_by_line(content):
"""Split the given content into a list of items by newline.
Both \r\n and \n are supported. This is done since it seems
that TTY devices on POSIX systems use \r\n for newlines in
some instances.
If the given content is an empty string or a string of only
whitespace, an empty list will be returned. If the given
content does not contain any newlines, it will be returned
as the only element in a single item list.
Leading and trailing whitespace is remove from all elements
returned.
:param str content: Content to split by newlines
:return: List of items that were separated by newlines.
:rtype: list
"""
# Make sure we don't end up splitting a string with
# just a single trailing \n or \r\n into multiple parts.
stripped = content.strip()
if not stripped:
return []
if '\r\n' in stripped:
return _strip_all(stripped.split('\r\n'))
if '\n' in stripped:
return _strip_all(stripped.split('\n'))
return _strip_all([stripped])
|
python
|
def split_by_line(content):
"""Split the given content into a list of items by newline.
Both \r\n and \n are supported. This is done since it seems
that TTY devices on POSIX systems use \r\n for newlines in
some instances.
If the given content is an empty string or a string of only
whitespace, an empty list will be returned. If the given
content does not contain any newlines, it will be returned
as the only element in a single item list.
Leading and trailing whitespace is remove from all elements
returned.
:param str content: Content to split by newlines
:return: List of items that were separated by newlines.
:rtype: list
"""
# Make sure we don't end up splitting a string with
# just a single trailing \n or \r\n into multiple parts.
stripped = content.strip()
if not stripped:
return []
if '\r\n' in stripped:
return _strip_all(stripped.split('\r\n'))
if '\n' in stripped:
return _strip_all(stripped.split('\n'))
return _strip_all([stripped])
|
[
"def",
"split_by_line",
"(",
"content",
")",
":",
"# Make sure we don't end up splitting a string with",
"# just a single trailing \\n or \\r\\n into multiple parts.",
"stripped",
"=",
"content",
".",
"strip",
"(",
")",
"if",
"not",
"stripped",
":",
"return",
"[",
"]",
"if",
"'\\r\\n'",
"in",
"stripped",
":",
"return",
"_strip_all",
"(",
"stripped",
".",
"split",
"(",
"'\\r\\n'",
")",
")",
"if",
"'\\n'",
"in",
"stripped",
":",
"return",
"_strip_all",
"(",
"stripped",
".",
"split",
"(",
"'\\n'",
")",
")",
"return",
"_strip_all",
"(",
"[",
"stripped",
"]",
")"
] |
Split the given content into a list of items by newline.
Both \r\n and \n are supported. This is done since it seems
that TTY devices on POSIX systems use \r\n for newlines in
some instances.
If the given content is an empty string or a string of only
whitespace, an empty list will be returned. If the given
content does not contain any newlines, it will be returned
as the only element in a single item list.
Leading and trailing whitespace is remove from all elements
returned.
:param str content: Content to split by newlines
:return: List of items that were separated by newlines.
:rtype: list
|
[
"Split",
"the",
"given",
"content",
"into",
"a",
"list",
"of",
"items",
"by",
"newline",
"."
] |
621f4398d59a9c9eb8dd602beadff11616048aa0
|
https://github.com/tshlabs/tunic/blob/621f4398d59a9c9eb8dd602beadff11616048aa0/tunic/core.py#L67-L96
|
243,026
|
tshlabs/tunic
|
tunic/core.py
|
get_release_id
|
def get_release_id(version=None):
"""Get a unique, time-based identifier for a deployment
that optionally, also includes some sort of version number
or release.
If a version is supplied, the release ID will be of the form
'$timestamp-$version'. For example:
>>> get_release_id(version='1.4.1')
'20140214231159-1.4.1'
If the version is not supplied the release ID will be of the
form '$timestamp'. For example:
>>> get_release_id()
'20140214231159'
The timestamp component of this release ID will be generated
using the current time in UTC.
:param str version: Version to include in the release ID
:return: Unique name for this particular deployment
:rtype: str
"""
# pylint: disable=invalid-name
ts = datetime.utcnow().strftime(RELEASE_DATE_FMT)
if version is None:
return ts
return '{0}-{1}'.format(ts, version)
|
python
|
def get_release_id(version=None):
"""Get a unique, time-based identifier for a deployment
that optionally, also includes some sort of version number
or release.
If a version is supplied, the release ID will be of the form
'$timestamp-$version'. For example:
>>> get_release_id(version='1.4.1')
'20140214231159-1.4.1'
If the version is not supplied the release ID will be of the
form '$timestamp'. For example:
>>> get_release_id()
'20140214231159'
The timestamp component of this release ID will be generated
using the current time in UTC.
:param str version: Version to include in the release ID
:return: Unique name for this particular deployment
:rtype: str
"""
# pylint: disable=invalid-name
ts = datetime.utcnow().strftime(RELEASE_DATE_FMT)
if version is None:
return ts
return '{0}-{1}'.format(ts, version)
|
[
"def",
"get_release_id",
"(",
"version",
"=",
"None",
")",
":",
"# pylint: disable=invalid-name",
"ts",
"=",
"datetime",
".",
"utcnow",
"(",
")",
".",
"strftime",
"(",
"RELEASE_DATE_FMT",
")",
"if",
"version",
"is",
"None",
":",
"return",
"ts",
"return",
"'{0}-{1}'",
".",
"format",
"(",
"ts",
",",
"version",
")"
] |
Get a unique, time-based identifier for a deployment
that optionally, also includes some sort of version number
or release.
If a version is supplied, the release ID will be of the form
'$timestamp-$version'. For example:
>>> get_release_id(version='1.4.1')
'20140214231159-1.4.1'
If the version is not supplied the release ID will be of the
form '$timestamp'. For example:
>>> get_release_id()
'20140214231159'
The timestamp component of this release ID will be generated
using the current time in UTC.
:param str version: Version to include in the release ID
:return: Unique name for this particular deployment
:rtype: str
|
[
"Get",
"a",
"unique",
"time",
"-",
"based",
"identifier",
"for",
"a",
"deployment",
"that",
"optionally",
"also",
"includes",
"some",
"sort",
"of",
"version",
"number",
"or",
"release",
"."
] |
621f4398d59a9c9eb8dd602beadff11616048aa0
|
https://github.com/tshlabs/tunic/blob/621f4398d59a9c9eb8dd602beadff11616048aa0/tunic/core.py#L147-L176
|
243,027
|
tshlabs/tunic
|
tunic/core.py
|
try_repeatedly
|
def try_repeatedly(method, max_retries=None, delay=None):
"""Execute the given Fabric call, retrying up to a certain number of times.
The method is expected to be wrapper around a Fabric :func:`run` or :func:`sudo`
call that returns the results of that call. The call will be executed at least
once, and up to :code:`max_retries` additional times until the call executes with
out failing.
Optionally, a delay in seconds can be specified in between successive calls.
:param callable method: Wrapped Fabric method to execute
:param int max_retries: Max number of times to retry execution after a failed call
:param float delay: Number of seconds between successive calls of :code:`method`
:return: The results of running :code:`method`
"""
max_retries = max_retries if max_retries is not None else 1
delay = delay if delay is not None else 0
tries = 0
with warn_only():
while tries < max_retries:
res = method()
if not res.failed:
return res
tries += 1
time.sleep(delay)
# final try outside the warn_only block so that if it
# fails it'll just blow up or do whatever it was going to
# do anyway.
return method()
|
python
|
def try_repeatedly(method, max_retries=None, delay=None):
"""Execute the given Fabric call, retrying up to a certain number of times.
The method is expected to be wrapper around a Fabric :func:`run` or :func:`sudo`
call that returns the results of that call. The call will be executed at least
once, and up to :code:`max_retries` additional times until the call executes with
out failing.
Optionally, a delay in seconds can be specified in between successive calls.
:param callable method: Wrapped Fabric method to execute
:param int max_retries: Max number of times to retry execution after a failed call
:param float delay: Number of seconds between successive calls of :code:`method`
:return: The results of running :code:`method`
"""
max_retries = max_retries if max_retries is not None else 1
delay = delay if delay is not None else 0
tries = 0
with warn_only():
while tries < max_retries:
res = method()
if not res.failed:
return res
tries += 1
time.sleep(delay)
# final try outside the warn_only block so that if it
# fails it'll just blow up or do whatever it was going to
# do anyway.
return method()
|
[
"def",
"try_repeatedly",
"(",
"method",
",",
"max_retries",
"=",
"None",
",",
"delay",
"=",
"None",
")",
":",
"max_retries",
"=",
"max_retries",
"if",
"max_retries",
"is",
"not",
"None",
"else",
"1",
"delay",
"=",
"delay",
"if",
"delay",
"is",
"not",
"None",
"else",
"0",
"tries",
"=",
"0",
"with",
"warn_only",
"(",
")",
":",
"while",
"tries",
"<",
"max_retries",
":",
"res",
"=",
"method",
"(",
")",
"if",
"not",
"res",
".",
"failed",
":",
"return",
"res",
"tries",
"+=",
"1",
"time",
".",
"sleep",
"(",
"delay",
")",
"# final try outside the warn_only block so that if it",
"# fails it'll just blow up or do whatever it was going to",
"# do anyway.",
"return",
"method",
"(",
")"
] |
Execute the given Fabric call, retrying up to a certain number of times.
The method is expected to be wrapper around a Fabric :func:`run` or :func:`sudo`
call that returns the results of that call. The call will be executed at least
once, and up to :code:`max_retries` additional times until the call executes with
out failing.
Optionally, a delay in seconds can be specified in between successive calls.
:param callable method: Wrapped Fabric method to execute
:param int max_retries: Max number of times to retry execution after a failed call
:param float delay: Number of seconds between successive calls of :code:`method`
:return: The results of running :code:`method`
|
[
"Execute",
"the",
"given",
"Fabric",
"call",
"retrying",
"up",
"to",
"a",
"certain",
"number",
"of",
"times",
"."
] |
621f4398d59a9c9eb8dd602beadff11616048aa0
|
https://github.com/tshlabs/tunic/blob/621f4398d59a9c9eb8dd602beadff11616048aa0/tunic/core.py#L219-L249
|
243,028
|
tshlabs/tunic
|
tunic/core.py
|
ReleaseManager.get_current_release
|
def get_current_release(self):
"""Get the release ID of the "current" deployment, None if
there is no current deployment.
This method performs one network operation.
:return: Get the current release ID
:rtype: str
"""
current = self._runner.run("readlink '{0}'".format(self._current))
if current.failed:
return None
return os.path.basename(current.strip())
|
python
|
def get_current_release(self):
"""Get the release ID of the "current" deployment, None if
there is no current deployment.
This method performs one network operation.
:return: Get the current release ID
:rtype: str
"""
current = self._runner.run("readlink '{0}'".format(self._current))
if current.failed:
return None
return os.path.basename(current.strip())
|
[
"def",
"get_current_release",
"(",
"self",
")",
":",
"current",
"=",
"self",
".",
"_runner",
".",
"run",
"(",
"\"readlink '{0}'\"",
".",
"format",
"(",
"self",
".",
"_current",
")",
")",
"if",
"current",
".",
"failed",
":",
"return",
"None",
"return",
"os",
".",
"path",
".",
"basename",
"(",
"current",
".",
"strip",
"(",
")",
")"
] |
Get the release ID of the "current" deployment, None if
there is no current deployment.
This method performs one network operation.
:return: Get the current release ID
:rtype: str
|
[
"Get",
"the",
"release",
"ID",
"of",
"the",
"current",
"deployment",
"None",
"if",
"there",
"is",
"no",
"current",
"deployment",
"."
] |
621f4398d59a9c9eb8dd602beadff11616048aa0
|
https://github.com/tshlabs/tunic/blob/621f4398d59a9c9eb8dd602beadff11616048aa0/tunic/core.py#L304-L316
|
243,029
|
tshlabs/tunic
|
tunic/core.py
|
ReleaseManager.get_previous_release
|
def get_previous_release(self):
"""Get the release ID of the deployment immediately
before the "current" deployment, ``None`` if no previous
release could be determined.
This method performs two network operations.
:return: The release ID of the release previous to the
"current" release.
:rtype: str
"""
releases = self.get_releases()
if not releases:
return None
current = self.get_current_release()
if not current:
return None
try:
current_idx = releases.index(current)
except ValueError:
return None
try:
return releases[current_idx + 1]
except IndexError:
return None
|
python
|
def get_previous_release(self):
"""Get the release ID of the deployment immediately
before the "current" deployment, ``None`` if no previous
release could be determined.
This method performs two network operations.
:return: The release ID of the release previous to the
"current" release.
:rtype: str
"""
releases = self.get_releases()
if not releases:
return None
current = self.get_current_release()
if not current:
return None
try:
current_idx = releases.index(current)
except ValueError:
return None
try:
return releases[current_idx + 1]
except IndexError:
return None
|
[
"def",
"get_previous_release",
"(",
"self",
")",
":",
"releases",
"=",
"self",
".",
"get_releases",
"(",
")",
"if",
"not",
"releases",
":",
"return",
"None",
"current",
"=",
"self",
".",
"get_current_release",
"(",
")",
"if",
"not",
"current",
":",
"return",
"None",
"try",
":",
"current_idx",
"=",
"releases",
".",
"index",
"(",
"current",
")",
"except",
"ValueError",
":",
"return",
"None",
"try",
":",
"return",
"releases",
"[",
"current_idx",
"+",
"1",
"]",
"except",
"IndexError",
":",
"return",
"None"
] |
Get the release ID of the deployment immediately
before the "current" deployment, ``None`` if no previous
release could be determined.
This method performs two network operations.
:return: The release ID of the release previous to the
"current" release.
:rtype: str
|
[
"Get",
"the",
"release",
"ID",
"of",
"the",
"deployment",
"immediately",
"before",
"the",
"current",
"deployment",
"None",
"if",
"no",
"previous",
"release",
"could",
"be",
"determined",
"."
] |
621f4398d59a9c9eb8dd602beadff11616048aa0
|
https://github.com/tshlabs/tunic/blob/621f4398d59a9c9eb8dd602beadff11616048aa0/tunic/core.py#L329-L356
|
243,030
|
tshlabs/tunic
|
tunic/core.py
|
ReleaseManager.cleanup
|
def cleanup(self, keep=5):
"""Remove all but the ``keep`` most recent releases.
If any of the candidates for deletion are pointed to by the
'current' symlink, they will not be deleted.
This method performs N + 2 network operations where N is the
number of old releases that are cleaned up.
:param int keep: Number of old releases to keep around
"""
releases = self.get_releases()
current_version = self.get_current_release()
to_delete = [version for version in releases[keep:] if version != current_version]
for release in to_delete:
self._runner.run("rm -rf '{0}'".format(os.path.join(self._releases, release)))
|
python
|
def cleanup(self, keep=5):
"""Remove all but the ``keep`` most recent releases.
If any of the candidates for deletion are pointed to by the
'current' symlink, they will not be deleted.
This method performs N + 2 network operations where N is the
number of old releases that are cleaned up.
:param int keep: Number of old releases to keep around
"""
releases = self.get_releases()
current_version = self.get_current_release()
to_delete = [version for version in releases[keep:] if version != current_version]
for release in to_delete:
self._runner.run("rm -rf '{0}'".format(os.path.join(self._releases, release)))
|
[
"def",
"cleanup",
"(",
"self",
",",
"keep",
"=",
"5",
")",
":",
"releases",
"=",
"self",
".",
"get_releases",
"(",
")",
"current_version",
"=",
"self",
".",
"get_current_release",
"(",
")",
"to_delete",
"=",
"[",
"version",
"for",
"version",
"in",
"releases",
"[",
"keep",
":",
"]",
"if",
"version",
"!=",
"current_version",
"]",
"for",
"release",
"in",
"to_delete",
":",
"self",
".",
"_runner",
".",
"run",
"(",
"\"rm -rf '{0}'\"",
".",
"format",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_releases",
",",
"release",
")",
")",
")"
] |
Remove all but the ``keep`` most recent releases.
If any of the candidates for deletion are pointed to by the
'current' symlink, they will not be deleted.
This method performs N + 2 network operations where N is the
number of old releases that are cleaned up.
:param int keep: Number of old releases to keep around
|
[
"Remove",
"all",
"but",
"the",
"keep",
"most",
"recent",
"releases",
"."
] |
621f4398d59a9c9eb8dd602beadff11616048aa0
|
https://github.com/tshlabs/tunic/blob/621f4398d59a9c9eb8dd602beadff11616048aa0/tunic/core.py#L387-L403
|
243,031
|
tshlabs/tunic
|
tunic/core.py
|
ProjectSetup.setup_directories
|
def setup_directories(self, use_sudo=True):
"""Create the minimal required directories for deploying multiple
releases of a project.
By default, creation of directories is done with the Fabric
``sudo`` function but can optionally use the ``run`` function.
This method performs one network operation.
:param bool use_sudo: If ``True``, use ``sudo()`` to create required
directories. If ``False`` try to create directories using the
``run()`` command.
"""
runner = self._runner.sudo if use_sudo else self._runner.run
runner("mkdir -p '{0}'".format(self._releases))
|
python
|
def setup_directories(self, use_sudo=True):
"""Create the minimal required directories for deploying multiple
releases of a project.
By default, creation of directories is done with the Fabric
``sudo`` function but can optionally use the ``run`` function.
This method performs one network operation.
:param bool use_sudo: If ``True``, use ``sudo()`` to create required
directories. If ``False`` try to create directories using the
``run()`` command.
"""
runner = self._runner.sudo if use_sudo else self._runner.run
runner("mkdir -p '{0}'".format(self._releases))
|
[
"def",
"setup_directories",
"(",
"self",
",",
"use_sudo",
"=",
"True",
")",
":",
"runner",
"=",
"self",
".",
"_runner",
".",
"sudo",
"if",
"use_sudo",
"else",
"self",
".",
"_runner",
".",
"run",
"runner",
"(",
"\"mkdir -p '{0}'\"",
".",
"format",
"(",
"self",
".",
"_releases",
")",
")"
] |
Create the minimal required directories for deploying multiple
releases of a project.
By default, creation of directories is done with the Fabric
``sudo`` function but can optionally use the ``run`` function.
This method performs one network operation.
:param bool use_sudo: If ``True``, use ``sudo()`` to create required
directories. If ``False`` try to create directories using the
``run()`` command.
|
[
"Create",
"the",
"minimal",
"required",
"directories",
"for",
"deploying",
"multiple",
"releases",
"of",
"a",
"project",
"."
] |
621f4398d59a9c9eb8dd602beadff11616048aa0
|
https://github.com/tshlabs/tunic/blob/621f4398d59a9c9eb8dd602beadff11616048aa0/tunic/core.py#L436-L450
|
243,032
|
tshlabs/tunic
|
tunic/core.py
|
ProjectSetup.set_permissions
|
def set_permissions(
self, owner, file_perms=PERMS_FILE_DEFAULT,
dir_perms=PERMS_DIR_DEFAULT, use_sudo=True):
"""Set the owner and permissions of the code deploy.
The owner will be set recursively for the entire code deploy.
The directory permissions will be set on only the base of the
code deploy and the releases directory. The file permissions
will be set recursively for the entire code deploy.
If not specified default values will be used for file or directory
permissions.
By default the Fabric ``sudo`` function will be used for changing
the owner and permissions of the code deploy. Optionally, you can
pass the ``use_sudo=False`` argument to skip trying to change the
owner of the code deploy and to use the ``run`` function to change
permissions.
This method performs between three and four network operations
depending on if ``use_sudo`` is false or true, respectively.
:param str owner: User and group in the form 'owner:group' to
set for the code deploy.
:param str file_perms: Permissions to set for all files in the
code deploy in the form 'u+perms,g+perms,o+perms'. Default
is ``u+rw,g+rw,o+r``.
:param str dir_perms: Permissions to set for the base and releases
directories in the form 'u+perms,g+perms,o+perms'. Default
is ``u+rwx,g+rws,o+rx``.
:param bool use_sudo: If ``True``, use ``sudo()`` to change ownership
and permissions of the code deploy. If ``False`` try to change
permissions using the ``run()`` command, do not change ownership.
.. versionchanged:: 0.2.0
``use_sudo=False`` will no longer attempt to change ownership of
the code deploy since this will just be a no-op or fail.
"""
runner = self._runner.sudo if use_sudo else self._runner.run
if use_sudo:
runner("chown -R '{0}' '{1}'".format(owner, self._base))
for path in (self._base, self._releases):
runner("chmod '{0}' '{1}'".format(dir_perms, path))
runner("chmod -R '{0}' '{1}'".format(file_perms, self._base))
|
python
|
def set_permissions(
self, owner, file_perms=PERMS_FILE_DEFAULT,
dir_perms=PERMS_DIR_DEFAULT, use_sudo=True):
"""Set the owner and permissions of the code deploy.
The owner will be set recursively for the entire code deploy.
The directory permissions will be set on only the base of the
code deploy and the releases directory. The file permissions
will be set recursively for the entire code deploy.
If not specified default values will be used for file or directory
permissions.
By default the Fabric ``sudo`` function will be used for changing
the owner and permissions of the code deploy. Optionally, you can
pass the ``use_sudo=False`` argument to skip trying to change the
owner of the code deploy and to use the ``run`` function to change
permissions.
This method performs between three and four network operations
depending on if ``use_sudo`` is false or true, respectively.
:param str owner: User and group in the form 'owner:group' to
set for the code deploy.
:param str file_perms: Permissions to set for all files in the
code deploy in the form 'u+perms,g+perms,o+perms'. Default
is ``u+rw,g+rw,o+r``.
:param str dir_perms: Permissions to set for the base and releases
directories in the form 'u+perms,g+perms,o+perms'. Default
is ``u+rwx,g+rws,o+rx``.
:param bool use_sudo: If ``True``, use ``sudo()`` to change ownership
and permissions of the code deploy. If ``False`` try to change
permissions using the ``run()`` command, do not change ownership.
.. versionchanged:: 0.2.0
``use_sudo=False`` will no longer attempt to change ownership of
the code deploy since this will just be a no-op or fail.
"""
runner = self._runner.sudo if use_sudo else self._runner.run
if use_sudo:
runner("chown -R '{0}' '{1}'".format(owner, self._base))
for path in (self._base, self._releases):
runner("chmod '{0}' '{1}'".format(dir_perms, path))
runner("chmod -R '{0}' '{1}'".format(file_perms, self._base))
|
[
"def",
"set_permissions",
"(",
"self",
",",
"owner",
",",
"file_perms",
"=",
"PERMS_FILE_DEFAULT",
",",
"dir_perms",
"=",
"PERMS_DIR_DEFAULT",
",",
"use_sudo",
"=",
"True",
")",
":",
"runner",
"=",
"self",
".",
"_runner",
".",
"sudo",
"if",
"use_sudo",
"else",
"self",
".",
"_runner",
".",
"run",
"if",
"use_sudo",
":",
"runner",
"(",
"\"chown -R '{0}' '{1}'\"",
".",
"format",
"(",
"owner",
",",
"self",
".",
"_base",
")",
")",
"for",
"path",
"in",
"(",
"self",
".",
"_base",
",",
"self",
".",
"_releases",
")",
":",
"runner",
"(",
"\"chmod '{0}' '{1}'\"",
".",
"format",
"(",
"dir_perms",
",",
"path",
")",
")",
"runner",
"(",
"\"chmod -R '{0}' '{1}'\"",
".",
"format",
"(",
"file_perms",
",",
"self",
".",
"_base",
")",
")"
] |
Set the owner and permissions of the code deploy.
The owner will be set recursively for the entire code deploy.
The directory permissions will be set on only the base of the
code deploy and the releases directory. The file permissions
will be set recursively for the entire code deploy.
If not specified default values will be used for file or directory
permissions.
By default the Fabric ``sudo`` function will be used for changing
the owner and permissions of the code deploy. Optionally, you can
pass the ``use_sudo=False`` argument to skip trying to change the
owner of the code deploy and to use the ``run`` function to change
permissions.
This method performs between three and four network operations
depending on if ``use_sudo`` is false or true, respectively.
:param str owner: User and group in the form 'owner:group' to
set for the code deploy.
:param str file_perms: Permissions to set for all files in the
code deploy in the form 'u+perms,g+perms,o+perms'. Default
is ``u+rw,g+rw,o+r``.
:param str dir_perms: Permissions to set for the base and releases
directories in the form 'u+perms,g+perms,o+perms'. Default
is ``u+rwx,g+rws,o+rx``.
:param bool use_sudo: If ``True``, use ``sudo()`` to change ownership
and permissions of the code deploy. If ``False`` try to change
permissions using the ``run()`` command, do not change ownership.
.. versionchanged:: 0.2.0
``use_sudo=False`` will no longer attempt to change ownership of
the code deploy since this will just be a no-op or fail.
|
[
"Set",
"the",
"owner",
"and",
"permissions",
"of",
"the",
"code",
"deploy",
"."
] |
621f4398d59a9c9eb8dd602beadff11616048aa0
|
https://github.com/tshlabs/tunic/blob/621f4398d59a9c9eb8dd602beadff11616048aa0/tunic/core.py#L452-L500
|
243,033
|
frigg/frigg-worker
|
frigg_worker/jobs.py
|
Job.create_pending_after_task
|
def create_pending_after_task(self):
"""
Creates pending task results in a dict on self.after_result with task string as key.
It will also create a list on self.tasks that is used to make sure the serialization
of the results creates a correctly ordered list.
"""
for task in self.settings.tasks[self.after_tasks_key]:
self.after_tasks.append(task)
self.after_results[task] = Result(task)
|
python
|
def create_pending_after_task(self):
"""
Creates pending task results in a dict on self.after_result with task string as key.
It will also create a list on self.tasks that is used to make sure the serialization
of the results creates a correctly ordered list.
"""
for task in self.settings.tasks[self.after_tasks_key]:
self.after_tasks.append(task)
self.after_results[task] = Result(task)
|
[
"def",
"create_pending_after_task",
"(",
"self",
")",
":",
"for",
"task",
"in",
"self",
".",
"settings",
".",
"tasks",
"[",
"self",
".",
"after_tasks_key",
"]",
":",
"self",
".",
"after_tasks",
".",
"append",
"(",
"task",
")",
"self",
".",
"after_results",
"[",
"task",
"]",
"=",
"Result",
"(",
"task",
")"
] |
Creates pending task results in a dict on self.after_result with task string as key.
It will also create a list on self.tasks that is used to make sure the serialization
of the results creates a correctly ordered list.
|
[
"Creates",
"pending",
"task",
"results",
"in",
"a",
"dict",
"on",
"self",
".",
"after_result",
"with",
"task",
"string",
"as",
"key",
".",
"It",
"will",
"also",
"create",
"a",
"list",
"on",
"self",
".",
"tasks",
"that",
"is",
"used",
"to",
"make",
"sure",
"the",
"serialization",
"of",
"the",
"results",
"creates",
"a",
"correctly",
"ordered",
"list",
"."
] |
8c215cd8f5a27ff9f5a4fedafe93d2ef0fbca86c
|
https://github.com/frigg/frigg-worker/blob/8c215cd8f5a27ff9f5a4fedafe93d2ef0fbca86c/frigg_worker/jobs.py#L231-L239
|
243,034
|
privacee/freelan-configurator
|
freelan_configurator/freelan_cmd.py
|
load_config
|
def load_config():
'''try loading config file from a default directory'''
cfg_path = '/usr/local/etc/freelan'
cfg_file = 'freelan.cfg'
if not os.path.isdir(cfg_path):
print("Can not find default freelan config directory.")
return
cfg_file_path = os.path.join(cfg_path,cfg_file)
if not os.path.isfile( cfg_file_path ):
print("Can not find default freelan config file.")
return
return _load_config(cfg_file_path)
|
python
|
def load_config():
'''try loading config file from a default directory'''
cfg_path = '/usr/local/etc/freelan'
cfg_file = 'freelan.cfg'
if not os.path.isdir(cfg_path):
print("Can not find default freelan config directory.")
return
cfg_file_path = os.path.join(cfg_path,cfg_file)
if not os.path.isfile( cfg_file_path ):
print("Can not find default freelan config file.")
return
return _load_config(cfg_file_path)
|
[
"def",
"load_config",
"(",
")",
":",
"cfg_path",
"=",
"'/usr/local/etc/freelan'",
"cfg_file",
"=",
"'freelan.cfg'",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"cfg_path",
")",
":",
"print",
"(",
"\"Can not find default freelan config directory.\"",
")",
"return",
"cfg_file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cfg_path",
",",
"cfg_file",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"cfg_file_path",
")",
":",
"print",
"(",
"\"Can not find default freelan config file.\"",
")",
"return",
"return",
"_load_config",
"(",
"cfg_file_path",
")"
] |
try loading config file from a default directory
|
[
"try",
"loading",
"config",
"file",
"from",
"a",
"default",
"directory"
] |
7c070f8958454792f870ef0d195a7f5da36edb5a
|
https://github.com/privacee/freelan-configurator/blob/7c070f8958454792f870ef0d195a7f5da36edb5a/freelan_configurator/freelan_cmd.py#L14-L29
|
243,035
|
privacee/freelan-configurator
|
freelan_configurator/freelan_cmd.py
|
write_config
|
def write_config(cfg):
'''try writing config file to a default directory'''
cfg_path = '/usr/local/etc/freelan'
cfg_file = 'freelan_TEST.cfg'
cfg_lines = []
if not isinstance(cfg, FreelanCFG):
if not isinstance(cfg, (list, tuple)):
print("Freelan write input can not be processed.")
return
cfg_lines = cfg
else:
cfg_lines = cfg.build()
if not os.path.isdir(cfg_path):
print("Can not find default freelan config directory.")
return
cfg_file_path = os.path.join(cfg_path,cfg_file)
if os.path.isfile( cfg_file_path ):
print("freelan config file already exists - moving to not replace content.")
ts = time.time()
backup_file = cfg_file_path+'.ORG-'+datetime.datetime.fromtimestamp(ts).strftime('%y-%m-%d-%H-%M-%S')
shutil.move(cfg_file_path, backup_file)
cfg_lines = [cfg_line+'\n' for cfg_line in cfg_lines]
with open(cfg_file_path, 'w') as cfg_f:
cfg_f.writelines(cfg_lines)
|
python
|
def write_config(cfg):
'''try writing config file to a default directory'''
cfg_path = '/usr/local/etc/freelan'
cfg_file = 'freelan_TEST.cfg'
cfg_lines = []
if not isinstance(cfg, FreelanCFG):
if not isinstance(cfg, (list, tuple)):
print("Freelan write input can not be processed.")
return
cfg_lines = cfg
else:
cfg_lines = cfg.build()
if not os.path.isdir(cfg_path):
print("Can not find default freelan config directory.")
return
cfg_file_path = os.path.join(cfg_path,cfg_file)
if os.path.isfile( cfg_file_path ):
print("freelan config file already exists - moving to not replace content.")
ts = time.time()
backup_file = cfg_file_path+'.ORG-'+datetime.datetime.fromtimestamp(ts).strftime('%y-%m-%d-%H-%M-%S')
shutil.move(cfg_file_path, backup_file)
cfg_lines = [cfg_line+'\n' for cfg_line in cfg_lines]
with open(cfg_file_path, 'w') as cfg_f:
cfg_f.writelines(cfg_lines)
|
[
"def",
"write_config",
"(",
"cfg",
")",
":",
"cfg_path",
"=",
"'/usr/local/etc/freelan'",
"cfg_file",
"=",
"'freelan_TEST.cfg'",
"cfg_lines",
"=",
"[",
"]",
"if",
"not",
"isinstance",
"(",
"cfg",
",",
"FreelanCFG",
")",
":",
"if",
"not",
"isinstance",
"(",
"cfg",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"print",
"(",
"\"Freelan write input can not be processed.\"",
")",
"return",
"cfg_lines",
"=",
"cfg",
"else",
":",
"cfg_lines",
"=",
"cfg",
".",
"build",
"(",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"cfg_path",
")",
":",
"print",
"(",
"\"Can not find default freelan config directory.\"",
")",
"return",
"cfg_file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cfg_path",
",",
"cfg_file",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"cfg_file_path",
")",
":",
"print",
"(",
"\"freelan config file already exists - moving to not replace content.\"",
")",
"ts",
"=",
"time",
".",
"time",
"(",
")",
"backup_file",
"=",
"cfg_file_path",
"+",
"'.ORG-'",
"+",
"datetime",
".",
"datetime",
".",
"fromtimestamp",
"(",
"ts",
")",
".",
"strftime",
"(",
"'%y-%m-%d-%H-%M-%S'",
")",
"shutil",
".",
"move",
"(",
"cfg_file_path",
",",
"backup_file",
")",
"cfg_lines",
"=",
"[",
"cfg_line",
"+",
"'\\n'",
"for",
"cfg_line",
"in",
"cfg_lines",
"]",
"with",
"open",
"(",
"cfg_file_path",
",",
"'w'",
")",
"as",
"cfg_f",
":",
"cfg_f",
".",
"writelines",
"(",
"cfg_lines",
")"
] |
try writing config file to a default directory
|
[
"try",
"writing",
"config",
"file",
"to",
"a",
"default",
"directory"
] |
7c070f8958454792f870ef0d195a7f5da36edb5a
|
https://github.com/privacee/freelan-configurator/blob/7c070f8958454792f870ef0d195a7f5da36edb5a/freelan_configurator/freelan_cmd.py#L76-L106
|
243,036
|
pyvec/pyvodb
|
pyvodb/cli/videometadata.py
|
cfgdump
|
def cfgdump(path, config):
"""Create output directory path and output there the config.yaml file."""
dump = yaml_dump(config)
if not os.path.exists(path):
os.makedirs(path)
with open(os.path.join(path, 'config.yaml'), 'w') as outf:
outf.write(dump)
print(dump)
|
python
|
def cfgdump(path, config):
"""Create output directory path and output there the config.yaml file."""
dump = yaml_dump(config)
if not os.path.exists(path):
os.makedirs(path)
with open(os.path.join(path, 'config.yaml'), 'w') as outf:
outf.write(dump)
print(dump)
|
[
"def",
"cfgdump",
"(",
"path",
",",
"config",
")",
":",
"dump",
"=",
"yaml_dump",
"(",
"config",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"os",
".",
"makedirs",
"(",
"path",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'config.yaml'",
")",
",",
"'w'",
")",
"as",
"outf",
":",
"outf",
".",
"write",
"(",
"dump",
")",
"print",
"(",
"dump",
")"
] |
Create output directory path and output there the config.yaml file.
|
[
"Create",
"output",
"directory",
"path",
"and",
"output",
"there",
"the",
"config",
".",
"yaml",
"file",
"."
] |
07183333df26eb12c5c2b98802cde3fb3a6c1339
|
https://github.com/pyvec/pyvodb/blob/07183333df26eb12c5c2b98802cde3fb3a6c1339/pyvodb/cli/videometadata.py#L11-L18
|
243,037
|
pyvec/pyvodb
|
pyvodb/cli/videometadata.py
|
videometadata
|
def videometadata(ctx, city, date, outpath):
"""Generate metadata for video records.
city: The meetup series.
\b
date: The date. May be:
- YYYY-MM-DD or YY-MM-DD (e.g. 2015-08-27)
- YYYY-MM or YY-MM (e.g. 2015-08)
- MM (e.g. 08): the given month in the current year
- pN (e.g. p1): show the N-th last meetup
"""
db = ctx.obj['db']
today = ctx.obj['now'].date()
event = cliutil.get_event(db, city, date, today)
data = event.as_dict()
cliutil.handle_raw_output(ctx, data)
evdir = "{}-{}".format(event.city.name, event.slug)
config = OrderedDict()
config['speaker'] = ''
config['title'] = ''
config['lightning'] = True
config['speaker_only'] = False
config['widescreen'] = False
config['speaker_vid'] = "*.MTS"
config['screen_vid'] = "*.ts"
config['event'] = event.name
if event.number:
config['event'] += " #{}".format(event.number)
config['date'] = event.date.strftime("%Y-%m-%d")
config['url'] = "https://pyvo.cz/{}/{}/".format(event.series_slug,
event.slug)
print(evdir)
cfgdump(os.path.join(outpath, evdir), config)
if event.talks:
for talknum, talk in enumerate(event.talks, start=1):
config['speaker'] = ', '.join(s.name for s in talk.speakers)
config['title'] = talk.title
config['lightning'] = talk.is_lightning
talkdir = "{:02d}-{}".format(talknum, slugify(talk.title))
print(talkdir)
cfgdump(os.path.join(outpath, evdir, talkdir), config)
|
python
|
def videometadata(ctx, city, date, outpath):
"""Generate metadata for video records.
city: The meetup series.
\b
date: The date. May be:
- YYYY-MM-DD or YY-MM-DD (e.g. 2015-08-27)
- YYYY-MM or YY-MM (e.g. 2015-08)
- MM (e.g. 08): the given month in the current year
- pN (e.g. p1): show the N-th last meetup
"""
db = ctx.obj['db']
today = ctx.obj['now'].date()
event = cliutil.get_event(db, city, date, today)
data = event.as_dict()
cliutil.handle_raw_output(ctx, data)
evdir = "{}-{}".format(event.city.name, event.slug)
config = OrderedDict()
config['speaker'] = ''
config['title'] = ''
config['lightning'] = True
config['speaker_only'] = False
config['widescreen'] = False
config['speaker_vid'] = "*.MTS"
config['screen_vid'] = "*.ts"
config['event'] = event.name
if event.number:
config['event'] += " #{}".format(event.number)
config['date'] = event.date.strftime("%Y-%m-%d")
config['url'] = "https://pyvo.cz/{}/{}/".format(event.series_slug,
event.slug)
print(evdir)
cfgdump(os.path.join(outpath, evdir), config)
if event.talks:
for talknum, talk in enumerate(event.talks, start=1):
config['speaker'] = ', '.join(s.name for s in talk.speakers)
config['title'] = talk.title
config['lightning'] = talk.is_lightning
talkdir = "{:02d}-{}".format(talknum, slugify(talk.title))
print(talkdir)
cfgdump(os.path.join(outpath, evdir, talkdir), config)
|
[
"def",
"videometadata",
"(",
"ctx",
",",
"city",
",",
"date",
",",
"outpath",
")",
":",
"db",
"=",
"ctx",
".",
"obj",
"[",
"'db'",
"]",
"today",
"=",
"ctx",
".",
"obj",
"[",
"'now'",
"]",
".",
"date",
"(",
")",
"event",
"=",
"cliutil",
".",
"get_event",
"(",
"db",
",",
"city",
",",
"date",
",",
"today",
")",
"data",
"=",
"event",
".",
"as_dict",
"(",
")",
"cliutil",
".",
"handle_raw_output",
"(",
"ctx",
",",
"data",
")",
"evdir",
"=",
"\"{}-{}\"",
".",
"format",
"(",
"event",
".",
"city",
".",
"name",
",",
"event",
".",
"slug",
")",
"config",
"=",
"OrderedDict",
"(",
")",
"config",
"[",
"'speaker'",
"]",
"=",
"''",
"config",
"[",
"'title'",
"]",
"=",
"''",
"config",
"[",
"'lightning'",
"]",
"=",
"True",
"config",
"[",
"'speaker_only'",
"]",
"=",
"False",
"config",
"[",
"'widescreen'",
"]",
"=",
"False",
"config",
"[",
"'speaker_vid'",
"]",
"=",
"\"*.MTS\"",
"config",
"[",
"'screen_vid'",
"]",
"=",
"\"*.ts\"",
"config",
"[",
"'event'",
"]",
"=",
"event",
".",
"name",
"if",
"event",
".",
"number",
":",
"config",
"[",
"'event'",
"]",
"+=",
"\" #{}\"",
".",
"format",
"(",
"event",
".",
"number",
")",
"config",
"[",
"'date'",
"]",
"=",
"event",
".",
"date",
".",
"strftime",
"(",
"\"%Y-%m-%d\"",
")",
"config",
"[",
"'url'",
"]",
"=",
"\"https://pyvo.cz/{}/{}/\"",
".",
"format",
"(",
"event",
".",
"series_slug",
",",
"event",
".",
"slug",
")",
"print",
"(",
"evdir",
")",
"cfgdump",
"(",
"os",
".",
"path",
".",
"join",
"(",
"outpath",
",",
"evdir",
")",
",",
"config",
")",
"if",
"event",
".",
"talks",
":",
"for",
"talknum",
",",
"talk",
"in",
"enumerate",
"(",
"event",
".",
"talks",
",",
"start",
"=",
"1",
")",
":",
"config",
"[",
"'speaker'",
"]",
"=",
"', '",
".",
"join",
"(",
"s",
".",
"name",
"for",
"s",
"in",
"talk",
".",
"speakers",
")",
"config",
"[",
"'title'",
"]",
"=",
"talk",
".",
"title",
"config",
"[",
"'lightning'",
"]",
"=",
"talk",
".",
"is_lightning",
"talkdir",
"=",
"\"{:02d}-{}\"",
".",
"format",
"(",
"talknum",
",",
"slugify",
"(",
"talk",
".",
"title",
")",
")",
"print",
"(",
"talkdir",
")",
"cfgdump",
"(",
"os",
".",
"path",
".",
"join",
"(",
"outpath",
",",
"evdir",
",",
"talkdir",
")",
",",
"config",
")"
] |
Generate metadata for video records.
city: The meetup series.
\b
date: The date. May be:
- YYYY-MM-DD or YY-MM-DD (e.g. 2015-08-27)
- YYYY-MM or YY-MM (e.g. 2015-08)
- MM (e.g. 08): the given month in the current year
- pN (e.g. p1): show the N-th last meetup
|
[
"Generate",
"metadata",
"for",
"video",
"records",
"."
] |
07183333df26eb12c5c2b98802cde3fb3a6c1339
|
https://github.com/pyvec/pyvodb/blob/07183333df26eb12c5c2b98802cde3fb3a6c1339/pyvodb/cli/videometadata.py#L26-L73
|
243,038
|
anjos/rrbob
|
rr/database.py
|
split_data
|
def split_data(data, subset, splits):
'''Returns the data for a given protocol
'''
return dict([(k, data[k][splits[subset]]) for k in data])
|
python
|
def split_data(data, subset, splits):
'''Returns the data for a given protocol
'''
return dict([(k, data[k][splits[subset]]) for k in data])
|
[
"def",
"split_data",
"(",
"data",
",",
"subset",
",",
"splits",
")",
":",
"return",
"dict",
"(",
"[",
"(",
"k",
",",
"data",
"[",
"k",
"]",
"[",
"splits",
"[",
"subset",
"]",
"]",
")",
"for",
"k",
"in",
"data",
"]",
")"
] |
Returns the data for a given protocol
|
[
"Returns",
"the",
"data",
"for",
"a",
"given",
"protocol"
] |
d32d35bab2aa2698d3caa923fd02afb6d67f3235
|
https://github.com/anjos/rrbob/blob/d32d35bab2aa2698d3caa923fd02afb6d67f3235/rr/database.py#L37-L41
|
243,039
|
anjos/rrbob
|
rr/database.py
|
get
|
def get(protocol, subset, classes=CLASSES, variables=VARIABLES):
'''Returns the data subset given a particular protocol
Parameters
protocol (string): one of the valid protocols supported by this interface
subset (string): one of 'train' or 'test'
classes (list of string): a list of strings containing the names of the
classes from which you want to have the data from
variables (list of strings): a list of strings containg the names of the
variables (features) you want to have data from
Returns:
data (numpy.ndarray): The data for all the classes and variables nicely
packed into one numpy 3D array. One depth represents the data for one
class, one row is one example, one column a given feature.
'''
retval = split_data(bob.db.iris.data(), subset, PROTOCOLS[protocol])
# filter variables (features)
varindex = [VARIABLES.index(k) for k in variables]
# filter class names and variable indexes at the same time
retval = dict([(k, retval[k][:,varindex]) for k in classes])
# squash the data
return numpy.array([retval[k] for k in classes])
|
python
|
def get(protocol, subset, classes=CLASSES, variables=VARIABLES):
'''Returns the data subset given a particular protocol
Parameters
protocol (string): one of the valid protocols supported by this interface
subset (string): one of 'train' or 'test'
classes (list of string): a list of strings containing the names of the
classes from which you want to have the data from
variables (list of strings): a list of strings containg the names of the
variables (features) you want to have data from
Returns:
data (numpy.ndarray): The data for all the classes and variables nicely
packed into one numpy 3D array. One depth represents the data for one
class, one row is one example, one column a given feature.
'''
retval = split_data(bob.db.iris.data(), subset, PROTOCOLS[protocol])
# filter variables (features)
varindex = [VARIABLES.index(k) for k in variables]
# filter class names and variable indexes at the same time
retval = dict([(k, retval[k][:,varindex]) for k in classes])
# squash the data
return numpy.array([retval[k] for k in classes])
|
[
"def",
"get",
"(",
"protocol",
",",
"subset",
",",
"classes",
"=",
"CLASSES",
",",
"variables",
"=",
"VARIABLES",
")",
":",
"retval",
"=",
"split_data",
"(",
"bob",
".",
"db",
".",
"iris",
".",
"data",
"(",
")",
",",
"subset",
",",
"PROTOCOLS",
"[",
"protocol",
"]",
")",
"# filter variables (features)",
"varindex",
"=",
"[",
"VARIABLES",
".",
"index",
"(",
"k",
")",
"for",
"k",
"in",
"variables",
"]",
"# filter class names and variable indexes at the same time",
"retval",
"=",
"dict",
"(",
"[",
"(",
"k",
",",
"retval",
"[",
"k",
"]",
"[",
":",
",",
"varindex",
"]",
")",
"for",
"k",
"in",
"classes",
"]",
")",
"# squash the data",
"return",
"numpy",
".",
"array",
"(",
"[",
"retval",
"[",
"k",
"]",
"for",
"k",
"in",
"classes",
"]",
")"
] |
Returns the data subset given a particular protocol
Parameters
protocol (string): one of the valid protocols supported by this interface
subset (string): one of 'train' or 'test'
classes (list of string): a list of strings containing the names of the
classes from which you want to have the data from
variables (list of strings): a list of strings containg the names of the
variables (features) you want to have data from
Returns:
data (numpy.ndarray): The data for all the classes and variables nicely
packed into one numpy 3D array. One depth represents the data for one
class, one row is one example, one column a given feature.
|
[
"Returns",
"the",
"data",
"subset",
"given",
"a",
"particular",
"protocol"
] |
d32d35bab2aa2698d3caa923fd02afb6d67f3235
|
https://github.com/anjos/rrbob/blob/d32d35bab2aa2698d3caa923fd02afb6d67f3235/rr/database.py#L44-L78
|
243,040
|
mayfield/shellish
|
shellish/completer.py
|
ActionCompleter.silent_parse_args
|
def silent_parse_args(self, command, args):
""" Silently attempt to parse args. If there is a failure then we
ignore the effects. Using an in-place namespace object ensures we
capture as many of the valid arguments as possible when the argparse
system would otherwise throw away the results. """
args_ns = argparse.Namespace()
stderr_save = argparse._sys.stderr
stdout_save = argparse._sys.stdout
argparse._sys.stderr = os.devnull
argparse._sys.stdout = os.devnull
try:
command.argparser.parse_known_args(args, args_ns)
except BaseException:
pass
finally:
argparse._sys.stderr = stderr_save
argparse._sys.stdout = stdout_save
return args_ns
|
python
|
def silent_parse_args(self, command, args):
""" Silently attempt to parse args. If there is a failure then we
ignore the effects. Using an in-place namespace object ensures we
capture as many of the valid arguments as possible when the argparse
system would otherwise throw away the results. """
args_ns = argparse.Namespace()
stderr_save = argparse._sys.stderr
stdout_save = argparse._sys.stdout
argparse._sys.stderr = os.devnull
argparse._sys.stdout = os.devnull
try:
command.argparser.parse_known_args(args, args_ns)
except BaseException:
pass
finally:
argparse._sys.stderr = stderr_save
argparse._sys.stdout = stdout_save
return args_ns
|
[
"def",
"silent_parse_args",
"(",
"self",
",",
"command",
",",
"args",
")",
":",
"args_ns",
"=",
"argparse",
".",
"Namespace",
"(",
")",
"stderr_save",
"=",
"argparse",
".",
"_sys",
".",
"stderr",
"stdout_save",
"=",
"argparse",
".",
"_sys",
".",
"stdout",
"argparse",
".",
"_sys",
".",
"stderr",
"=",
"os",
".",
"devnull",
"argparse",
".",
"_sys",
".",
"stdout",
"=",
"os",
".",
"devnull",
"try",
":",
"command",
".",
"argparser",
".",
"parse_known_args",
"(",
"args",
",",
"args_ns",
")",
"except",
"BaseException",
":",
"pass",
"finally",
":",
"argparse",
".",
"_sys",
".",
"stderr",
"=",
"stderr_save",
"argparse",
".",
"_sys",
".",
"stdout",
"=",
"stdout_save",
"return",
"args_ns"
] |
Silently attempt to parse args. If there is a failure then we
ignore the effects. Using an in-place namespace object ensures we
capture as many of the valid arguments as possible when the argparse
system would otherwise throw away the results.
|
[
"Silently",
"attempt",
"to",
"parse",
"args",
".",
"If",
"there",
"is",
"a",
"failure",
"then",
"we",
"ignore",
"the",
"effects",
".",
"Using",
"an",
"in",
"-",
"place",
"namespace",
"object",
"ensures",
"we",
"capture",
"as",
"many",
"of",
"the",
"valid",
"arguments",
"as",
"possible",
"when",
"the",
"argparse",
"system",
"would",
"otherwise",
"throw",
"away",
"the",
"results",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/completer.py#L53-L70
|
243,041
|
mayfield/shellish
|
shellish/completer.py
|
ActionCompleter.parse_nargs
|
def parse_nargs(self, nargs):
""" Nargs is essentially a multi-type encoding. We have to parse it
to understand how many values this action may consume. """
self.max_args = self.min_args = 0
if nargs is None:
self.max_args = self.min_args = 1
elif nargs == argparse.OPTIONAL:
self.max_args = 1
elif nargs == argparse.ZERO_OR_MORE:
self.max_args = None
elif nargs in (argparse.ONE_OR_MORE, argparse.REMAINDER):
self.min_args = 1
self.max_args = None
elif nargs != argparse.PARSER:
self.max_args = self.min_args = nargs
|
python
|
def parse_nargs(self, nargs):
""" Nargs is essentially a multi-type encoding. We have to parse it
to understand how many values this action may consume. """
self.max_args = self.min_args = 0
if nargs is None:
self.max_args = self.min_args = 1
elif nargs == argparse.OPTIONAL:
self.max_args = 1
elif nargs == argparse.ZERO_OR_MORE:
self.max_args = None
elif nargs in (argparse.ONE_OR_MORE, argparse.REMAINDER):
self.min_args = 1
self.max_args = None
elif nargs != argparse.PARSER:
self.max_args = self.min_args = nargs
|
[
"def",
"parse_nargs",
"(",
"self",
",",
"nargs",
")",
":",
"self",
".",
"max_args",
"=",
"self",
".",
"min_args",
"=",
"0",
"if",
"nargs",
"is",
"None",
":",
"self",
".",
"max_args",
"=",
"self",
".",
"min_args",
"=",
"1",
"elif",
"nargs",
"==",
"argparse",
".",
"OPTIONAL",
":",
"self",
".",
"max_args",
"=",
"1",
"elif",
"nargs",
"==",
"argparse",
".",
"ZERO_OR_MORE",
":",
"self",
".",
"max_args",
"=",
"None",
"elif",
"nargs",
"in",
"(",
"argparse",
".",
"ONE_OR_MORE",
",",
"argparse",
".",
"REMAINDER",
")",
":",
"self",
".",
"min_args",
"=",
"1",
"self",
".",
"max_args",
"=",
"None",
"elif",
"nargs",
"!=",
"argparse",
".",
"PARSER",
":",
"self",
".",
"max_args",
"=",
"self",
".",
"min_args",
"=",
"nargs"
] |
Nargs is essentially a multi-type encoding. We have to parse it
to understand how many values this action may consume.
|
[
"Nargs",
"is",
"essentially",
"a",
"multi",
"-",
"type",
"encoding",
".",
"We",
"have",
"to",
"parse",
"it",
"to",
"understand",
"how",
"many",
"values",
"this",
"action",
"may",
"consume",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/completer.py#L72-L86
|
243,042
|
mayfield/shellish
|
shellish/completer.py
|
ActionCompleter.consume
|
def consume(self, args):
""" Consume the arguments we support. The args are modified inline.
The return value is the number of args eaten. """
consumable = args[:self.max_args]
self.consumed = len(consumable)
del args[:self.consumed]
return self.consumed
|
python
|
def consume(self, args):
""" Consume the arguments we support. The args are modified inline.
The return value is the number of args eaten. """
consumable = args[:self.max_args]
self.consumed = len(consumable)
del args[:self.consumed]
return self.consumed
|
[
"def",
"consume",
"(",
"self",
",",
"args",
")",
":",
"consumable",
"=",
"args",
"[",
":",
"self",
".",
"max_args",
"]",
"self",
".",
"consumed",
"=",
"len",
"(",
"consumable",
")",
"del",
"args",
"[",
":",
"self",
".",
"consumed",
"]",
"return",
"self",
".",
"consumed"
] |
Consume the arguments we support. The args are modified inline.
The return value is the number of args eaten.
|
[
"Consume",
"the",
"arguments",
"we",
"support",
".",
"The",
"args",
"are",
"modified",
"inline",
".",
"The",
"return",
"value",
"is",
"the",
"number",
"of",
"args",
"eaten",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/completer.py#L88-L94
|
243,043
|
mayfield/shellish
|
shellish/completer.py
|
ActionCompleter.about_action
|
def about_action(self):
""" Simple string describing the action. """
name = self.action.metavar or self.action.dest
type_name = self.action.type.__name__ if self.action.type else ''
if self.action.help or type_name:
extra = ' (%s)' % (self.action.help or 'type: %s' % type_name)
else:
extra = ''
return name + extra
|
python
|
def about_action(self):
""" Simple string describing the action. """
name = self.action.metavar or self.action.dest
type_name = self.action.type.__name__ if self.action.type else ''
if self.action.help or type_name:
extra = ' (%s)' % (self.action.help or 'type: %s' % type_name)
else:
extra = ''
return name + extra
|
[
"def",
"about_action",
"(",
"self",
")",
":",
"name",
"=",
"self",
".",
"action",
".",
"metavar",
"or",
"self",
".",
"action",
".",
"dest",
"type_name",
"=",
"self",
".",
"action",
".",
"type",
".",
"__name__",
"if",
"self",
".",
"action",
".",
"type",
"else",
"''",
"if",
"self",
".",
"action",
".",
"help",
"or",
"type_name",
":",
"extra",
"=",
"' (%s)'",
"%",
"(",
"self",
".",
"action",
".",
"help",
"or",
"'type: %s'",
"%",
"type_name",
")",
"else",
":",
"extra",
"=",
"''",
"return",
"name",
"+",
"extra"
] |
Simple string describing the action.
|
[
"Simple",
"string",
"describing",
"the",
"action",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/completer.py#L108-L116
|
243,044
|
mayfield/shellish
|
shellish/completer.py
|
ActionCompleter.file_complete
|
def file_complete(self, prefix, args):
""" Look in the local filesystem for valid file choices. """
path = os.path.expanduser(prefix)
dirname, name = os.path.split(path)
if not dirname:
dirname = '.'
try:
dirs = os.listdir(dirname)
except FileNotFoundError:
return frozenset()
choices = []
session = self.calling_command.session
for f in dirs:
try:
if (not name or f.startswith(name)) and \
not f.startswith('.'):
choices.append(f)
except PermissionError:
pass
prevent_pad = session.pad_completion and len(choices) == 1 and \
os.path.isdir(choices[0])
names = [os.path.join(dirname, x) for x in choices]
if prevent_pad:
names.append(names[0] + '/')
return frozenset(names)
|
python
|
def file_complete(self, prefix, args):
""" Look in the local filesystem for valid file choices. """
path = os.path.expanduser(prefix)
dirname, name = os.path.split(path)
if not dirname:
dirname = '.'
try:
dirs = os.listdir(dirname)
except FileNotFoundError:
return frozenset()
choices = []
session = self.calling_command.session
for f in dirs:
try:
if (not name or f.startswith(name)) and \
not f.startswith('.'):
choices.append(f)
except PermissionError:
pass
prevent_pad = session.pad_completion and len(choices) == 1 and \
os.path.isdir(choices[0])
names = [os.path.join(dirname, x) for x in choices]
if prevent_pad:
names.append(names[0] + '/')
return frozenset(names)
|
[
"def",
"file_complete",
"(",
"self",
",",
"prefix",
",",
"args",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"prefix",
")",
"dirname",
",",
"name",
"=",
"os",
".",
"path",
".",
"split",
"(",
"path",
")",
"if",
"not",
"dirname",
":",
"dirname",
"=",
"'.'",
"try",
":",
"dirs",
"=",
"os",
".",
"listdir",
"(",
"dirname",
")",
"except",
"FileNotFoundError",
":",
"return",
"frozenset",
"(",
")",
"choices",
"=",
"[",
"]",
"session",
"=",
"self",
".",
"calling_command",
".",
"session",
"for",
"f",
"in",
"dirs",
":",
"try",
":",
"if",
"(",
"not",
"name",
"or",
"f",
".",
"startswith",
"(",
"name",
")",
")",
"and",
"not",
"f",
".",
"startswith",
"(",
"'.'",
")",
":",
"choices",
".",
"append",
"(",
"f",
")",
"except",
"PermissionError",
":",
"pass",
"prevent_pad",
"=",
"session",
".",
"pad_completion",
"and",
"len",
"(",
"choices",
")",
"==",
"1",
"and",
"os",
".",
"path",
".",
"isdir",
"(",
"choices",
"[",
"0",
"]",
")",
"names",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"dirname",
",",
"x",
")",
"for",
"x",
"in",
"choices",
"]",
"if",
"prevent_pad",
":",
"names",
".",
"append",
"(",
"names",
"[",
"0",
"]",
"+",
"'/'",
")",
"return",
"frozenset",
"(",
"names",
")"
] |
Look in the local filesystem for valid file choices.
|
[
"Look",
"in",
"the",
"local",
"filesystem",
"for",
"valid",
"file",
"choices",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/completer.py#L121-L145
|
243,045
|
pytools/pytools-command
|
pytools_command/core.py
|
exec_command
|
def exec_command(command, **kwargs):
"""
Executes the given command and send the output to the console
:param str|list command:
:kwargs:
* `shell` (``bool`` = False) --
* `stdin` (``*`` = None) --
* `stdout` (``*`` = None) --
* `stderr` (``*`` = None) --
:return: CommandReturnValue
"""
shell = kwargs.get('shell', False)
stdin = kwargs.get('stdin', None)
stdout = kwargs.get('stdout', None)
stderr = kwargs.get('stderr', None)
kwargs.update(shell=shell)
kwargs.update(stdin=stdin)
kwargs.update(stdout=stdout)
kwargs.update(stderr=stderr)
if not isinstance(command, list):
command = shlex.split(command)
return_value = subprocess.call(command, **kwargs)
return CommandReturnValue(return_value=return_value,
stdin=stdin,
stdout=stdout,
stderr=stderr)
|
python
|
def exec_command(command, **kwargs):
"""
Executes the given command and send the output to the console
:param str|list command:
:kwargs:
* `shell` (``bool`` = False) --
* `stdin` (``*`` = None) --
* `stdout` (``*`` = None) --
* `stderr` (``*`` = None) --
:return: CommandReturnValue
"""
shell = kwargs.get('shell', False)
stdin = kwargs.get('stdin', None)
stdout = kwargs.get('stdout', None)
stderr = kwargs.get('stderr', None)
kwargs.update(shell=shell)
kwargs.update(stdin=stdin)
kwargs.update(stdout=stdout)
kwargs.update(stderr=stderr)
if not isinstance(command, list):
command = shlex.split(command)
return_value = subprocess.call(command, **kwargs)
return CommandReturnValue(return_value=return_value,
stdin=stdin,
stdout=stdout,
stderr=stderr)
|
[
"def",
"exec_command",
"(",
"command",
",",
"*",
"*",
"kwargs",
")",
":",
"shell",
"=",
"kwargs",
".",
"get",
"(",
"'shell'",
",",
"False",
")",
"stdin",
"=",
"kwargs",
".",
"get",
"(",
"'stdin'",
",",
"None",
")",
"stdout",
"=",
"kwargs",
".",
"get",
"(",
"'stdout'",
",",
"None",
")",
"stderr",
"=",
"kwargs",
".",
"get",
"(",
"'stderr'",
",",
"None",
")",
"kwargs",
".",
"update",
"(",
"shell",
"=",
"shell",
")",
"kwargs",
".",
"update",
"(",
"stdin",
"=",
"stdin",
")",
"kwargs",
".",
"update",
"(",
"stdout",
"=",
"stdout",
")",
"kwargs",
".",
"update",
"(",
"stderr",
"=",
"stderr",
")",
"if",
"not",
"isinstance",
"(",
"command",
",",
"list",
")",
":",
"command",
"=",
"shlex",
".",
"split",
"(",
"command",
")",
"return_value",
"=",
"subprocess",
".",
"call",
"(",
"command",
",",
"*",
"*",
"kwargs",
")",
"return",
"CommandReturnValue",
"(",
"return_value",
"=",
"return_value",
",",
"stdin",
"=",
"stdin",
",",
"stdout",
"=",
"stdout",
",",
"stderr",
"=",
"stderr",
")"
] |
Executes the given command and send the output to the console
:param str|list command:
:kwargs:
* `shell` (``bool`` = False) --
* `stdin` (``*`` = None) --
* `stdout` (``*`` = None) --
* `stderr` (``*`` = None) --
:return: CommandReturnValue
|
[
"Executes",
"the",
"given",
"command",
"and",
"send",
"the",
"output",
"to",
"the",
"console"
] |
ec6793d8ecac46fa0899f64596aa914f4a09d7f6
|
https://github.com/pytools/pytools-command/blob/ec6793d8ecac46fa0899f64596aa914f4a09d7f6/pytools_command/core.py#L40-L73
|
243,046
|
pytools/pytools-command
|
pytools_command/core.py
|
observe_command
|
def observe_command(command, **kwargs):
"""
Executes the given command and captures the output without any output to the console
:param str|list command:
:kwargs:
* `shell` (``bool`` = False) --
* `timeout` (``int`` = 15) -- Timeout in seconds
* `stdin` (``*`` = None) --
* `stdout` (``*`` = None) --
* `stderr` (``*`` = None) --
* `cwd` (``string`` = None) --
:return: CommandReturnValue
"""
shell = kwargs.get('shell', False)
timeout = kwargs.get('timeout', 15)
stdin = kwargs.get('stdin', subprocess.PIPE)
stdout = kwargs.get('stdout', subprocess.PIPE)
stderr = kwargs.get('stderr', subprocess.PIPE)
cwd = kwargs.get('cwd', None)
kwargs.update(shell=shell)
kwargs.update(stdin=stdin)
kwargs.update(stdout=stdout)
kwargs.update(stderr=stderr)
kwargs.update(cwd=cwd)
if not isinstance(command, list):
command = shlex.split(command)
# TODO: implement and process stdin - 1
proc = subprocess.Popen(command, **kwargs)
try:
# only Python versions from 3.3 have the 'timeout' argument
if sys.version_info[0] >= 3 and sys.version_info[1] >= 3:
proc_stdout, proc_stderr = proc.communicate(timeout=timeout)
else:
proc_stdout, proc_stderr = proc.communicate()
except subprocess.TimeoutExpired:
proc.kill()
proc_stdout, proc_stderr = proc.communicate()
# TODO: implement and process stdin - 2
# process stdin
# try:
# _stdin = proc.stdin.read()
# except IOError:
# _stdin = None
#
# if not _stdin:
# _stdin = None
# process stdout
try:
_stdout = proc_stdout.decode('utf-8')
except IOError:
_stdout = None
if not _stdout:
_stdout = None
# process stderr
try:
_stderr = proc_stderr.decode('utf-8')
except IOError:
_stderr = None
if not _stderr:
_stderr = None
return CommandReturnValue(return_value=proc.returncode,
stdout=_stdout,
stderr=_stderr)
|
python
|
def observe_command(command, **kwargs):
"""
Executes the given command and captures the output without any output to the console
:param str|list command:
:kwargs:
* `shell` (``bool`` = False) --
* `timeout` (``int`` = 15) -- Timeout in seconds
* `stdin` (``*`` = None) --
* `stdout` (``*`` = None) --
* `stderr` (``*`` = None) --
* `cwd` (``string`` = None) --
:return: CommandReturnValue
"""
shell = kwargs.get('shell', False)
timeout = kwargs.get('timeout', 15)
stdin = kwargs.get('stdin', subprocess.PIPE)
stdout = kwargs.get('stdout', subprocess.PIPE)
stderr = kwargs.get('stderr', subprocess.PIPE)
cwd = kwargs.get('cwd', None)
kwargs.update(shell=shell)
kwargs.update(stdin=stdin)
kwargs.update(stdout=stdout)
kwargs.update(stderr=stderr)
kwargs.update(cwd=cwd)
if not isinstance(command, list):
command = shlex.split(command)
# TODO: implement and process stdin - 1
proc = subprocess.Popen(command, **kwargs)
try:
# only Python versions from 3.3 have the 'timeout' argument
if sys.version_info[0] >= 3 and sys.version_info[1] >= 3:
proc_stdout, proc_stderr = proc.communicate(timeout=timeout)
else:
proc_stdout, proc_stderr = proc.communicate()
except subprocess.TimeoutExpired:
proc.kill()
proc_stdout, proc_stderr = proc.communicate()
# TODO: implement and process stdin - 2
# process stdin
# try:
# _stdin = proc.stdin.read()
# except IOError:
# _stdin = None
#
# if not _stdin:
# _stdin = None
# process stdout
try:
_stdout = proc_stdout.decode('utf-8')
except IOError:
_stdout = None
if not _stdout:
_stdout = None
# process stderr
try:
_stderr = proc_stderr.decode('utf-8')
except IOError:
_stderr = None
if not _stderr:
_stderr = None
return CommandReturnValue(return_value=proc.returncode,
stdout=_stdout,
stderr=_stderr)
|
[
"def",
"observe_command",
"(",
"command",
",",
"*",
"*",
"kwargs",
")",
":",
"shell",
"=",
"kwargs",
".",
"get",
"(",
"'shell'",
",",
"False",
")",
"timeout",
"=",
"kwargs",
".",
"get",
"(",
"'timeout'",
",",
"15",
")",
"stdin",
"=",
"kwargs",
".",
"get",
"(",
"'stdin'",
",",
"subprocess",
".",
"PIPE",
")",
"stdout",
"=",
"kwargs",
".",
"get",
"(",
"'stdout'",
",",
"subprocess",
".",
"PIPE",
")",
"stderr",
"=",
"kwargs",
".",
"get",
"(",
"'stderr'",
",",
"subprocess",
".",
"PIPE",
")",
"cwd",
"=",
"kwargs",
".",
"get",
"(",
"'cwd'",
",",
"None",
")",
"kwargs",
".",
"update",
"(",
"shell",
"=",
"shell",
")",
"kwargs",
".",
"update",
"(",
"stdin",
"=",
"stdin",
")",
"kwargs",
".",
"update",
"(",
"stdout",
"=",
"stdout",
")",
"kwargs",
".",
"update",
"(",
"stderr",
"=",
"stderr",
")",
"kwargs",
".",
"update",
"(",
"cwd",
"=",
"cwd",
")",
"if",
"not",
"isinstance",
"(",
"command",
",",
"list",
")",
":",
"command",
"=",
"shlex",
".",
"split",
"(",
"command",
")",
"# TODO: implement and process stdin - 1",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
"command",
",",
"*",
"*",
"kwargs",
")",
"try",
":",
"# only Python versions from 3.3 have the 'timeout' argument",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
">=",
"3",
"and",
"sys",
".",
"version_info",
"[",
"1",
"]",
">=",
"3",
":",
"proc_stdout",
",",
"proc_stderr",
"=",
"proc",
".",
"communicate",
"(",
"timeout",
"=",
"timeout",
")",
"else",
":",
"proc_stdout",
",",
"proc_stderr",
"=",
"proc",
".",
"communicate",
"(",
")",
"except",
"subprocess",
".",
"TimeoutExpired",
":",
"proc",
".",
"kill",
"(",
")",
"proc_stdout",
",",
"proc_stderr",
"=",
"proc",
".",
"communicate",
"(",
")",
"# TODO: implement and process stdin - 2",
"# process stdin",
"# try:",
"# _stdin = proc.stdin.read()",
"# except IOError:",
"# _stdin = None",
"#",
"# if not _stdin:",
"# _stdin = None",
"# process stdout",
"try",
":",
"_stdout",
"=",
"proc_stdout",
".",
"decode",
"(",
"'utf-8'",
")",
"except",
"IOError",
":",
"_stdout",
"=",
"None",
"if",
"not",
"_stdout",
":",
"_stdout",
"=",
"None",
"# process stderr",
"try",
":",
"_stderr",
"=",
"proc_stderr",
".",
"decode",
"(",
"'utf-8'",
")",
"except",
"IOError",
":",
"_stderr",
"=",
"None",
"if",
"not",
"_stderr",
":",
"_stderr",
"=",
"None",
"return",
"CommandReturnValue",
"(",
"return_value",
"=",
"proc",
".",
"returncode",
",",
"stdout",
"=",
"_stdout",
",",
"stderr",
"=",
"_stderr",
")"
] |
Executes the given command and captures the output without any output to the console
:param str|list command:
:kwargs:
* `shell` (``bool`` = False) --
* `timeout` (``int`` = 15) -- Timeout in seconds
* `stdin` (``*`` = None) --
* `stdout` (``*`` = None) --
* `stderr` (``*`` = None) --
* `cwd` (``string`` = None) --
:return: CommandReturnValue
|
[
"Executes",
"the",
"given",
"command",
"and",
"captures",
"the",
"output",
"without",
"any",
"output",
"to",
"the",
"console"
] |
ec6793d8ecac46fa0899f64596aa914f4a09d7f6
|
https://github.com/pytools/pytools-command/blob/ec6793d8ecac46fa0899f64596aa914f4a09d7f6/pytools_command/core.py#L76-L154
|
243,047
|
political-memory/django-representatives
|
representatives/migrations/0016_chamber_migrate_data.py
|
calculate_hash
|
def calculate_hash(obj):
"""
Computes fingerprint for an object, this code is duplicated from
representatives.models.HashableModel because we don't have access to model
methods in a migration scenario.
"""
hashable_fields = {
'Chamber': ['name', 'country', 'abbreviation'],
'Constituency': ['name'],
'Group': ['name', 'abbreviation', 'kind', 'chamber'],
'Mandate': ['group', 'constituency', 'role', 'begin_date', 'end_date',
'representative']
}
fingerprint = hashlib.sha1()
for field_name in hashable_fields[obj.__class__.__name__]:
field = obj._meta.get_field(field_name)
if field.is_relation:
related = getattr(obj, field_name)
if related is None:
fingerprint.update(smart_str(related))
else:
fingerprint.update(related.fingerprint)
else:
fingerprint.update(smart_str(getattr(obj, field_name)))
obj.fingerprint = fingerprint.hexdigest()
return obj.fingerprint
|
python
|
def calculate_hash(obj):
"""
Computes fingerprint for an object, this code is duplicated from
representatives.models.HashableModel because we don't have access to model
methods in a migration scenario.
"""
hashable_fields = {
'Chamber': ['name', 'country', 'abbreviation'],
'Constituency': ['name'],
'Group': ['name', 'abbreviation', 'kind', 'chamber'],
'Mandate': ['group', 'constituency', 'role', 'begin_date', 'end_date',
'representative']
}
fingerprint = hashlib.sha1()
for field_name in hashable_fields[obj.__class__.__name__]:
field = obj._meta.get_field(field_name)
if field.is_relation:
related = getattr(obj, field_name)
if related is None:
fingerprint.update(smart_str(related))
else:
fingerprint.update(related.fingerprint)
else:
fingerprint.update(smart_str(getattr(obj, field_name)))
obj.fingerprint = fingerprint.hexdigest()
return obj.fingerprint
|
[
"def",
"calculate_hash",
"(",
"obj",
")",
":",
"hashable_fields",
"=",
"{",
"'Chamber'",
":",
"[",
"'name'",
",",
"'country'",
",",
"'abbreviation'",
"]",
",",
"'Constituency'",
":",
"[",
"'name'",
"]",
",",
"'Group'",
":",
"[",
"'name'",
",",
"'abbreviation'",
",",
"'kind'",
",",
"'chamber'",
"]",
",",
"'Mandate'",
":",
"[",
"'group'",
",",
"'constituency'",
",",
"'role'",
",",
"'begin_date'",
",",
"'end_date'",
",",
"'representative'",
"]",
"}",
"fingerprint",
"=",
"hashlib",
".",
"sha1",
"(",
")",
"for",
"field_name",
"in",
"hashable_fields",
"[",
"obj",
".",
"__class__",
".",
"__name__",
"]",
":",
"field",
"=",
"obj",
".",
"_meta",
".",
"get_field",
"(",
"field_name",
")",
"if",
"field",
".",
"is_relation",
":",
"related",
"=",
"getattr",
"(",
"obj",
",",
"field_name",
")",
"if",
"related",
"is",
"None",
":",
"fingerprint",
".",
"update",
"(",
"smart_str",
"(",
"related",
")",
")",
"else",
":",
"fingerprint",
".",
"update",
"(",
"related",
".",
"fingerprint",
")",
"else",
":",
"fingerprint",
".",
"update",
"(",
"smart_str",
"(",
"getattr",
"(",
"obj",
",",
"field_name",
")",
")",
")",
"obj",
".",
"fingerprint",
"=",
"fingerprint",
".",
"hexdigest",
"(",
")",
"return",
"obj",
".",
"fingerprint"
] |
Computes fingerprint for an object, this code is duplicated from
representatives.models.HashableModel because we don't have access to model
methods in a migration scenario.
|
[
"Computes",
"fingerprint",
"for",
"an",
"object",
"this",
"code",
"is",
"duplicated",
"from",
"representatives",
".",
"models",
".",
"HashableModel",
"because",
"we",
"don",
"t",
"have",
"access",
"to",
"model",
"methods",
"in",
"a",
"migration",
"scenario",
"."
] |
811c90d0250149e913e6196f0ab11c97d396be39
|
https://github.com/political-memory/django-representatives/blob/811c90d0250149e913e6196f0ab11c97d396be39/representatives/migrations/0016_chamber_migrate_data.py#L9-L36
|
243,048
|
political-memory/django-representatives
|
representatives/migrations/0016_chamber_migrate_data.py
|
get_or_create
|
def get_or_create(cls, **kwargs):
"""
Implements get_or_create logic for models that inherit from
representatives.models.HashableModel because we don't have access to model
methods in a migration scenario.
"""
try:
obj = cls.objects.get(**kwargs)
created = False
except cls.DoesNotExist:
obj = cls(**kwargs)
created = True
calculate_hash(obj)
obj.save()
return (obj, created)
|
python
|
def get_or_create(cls, **kwargs):
"""
Implements get_or_create logic for models that inherit from
representatives.models.HashableModel because we don't have access to model
methods in a migration scenario.
"""
try:
obj = cls.objects.get(**kwargs)
created = False
except cls.DoesNotExist:
obj = cls(**kwargs)
created = True
calculate_hash(obj)
obj.save()
return (obj, created)
|
[
"def",
"get_or_create",
"(",
"cls",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"obj",
"=",
"cls",
".",
"objects",
".",
"get",
"(",
"*",
"*",
"kwargs",
")",
"created",
"=",
"False",
"except",
"cls",
".",
"DoesNotExist",
":",
"obj",
"=",
"cls",
"(",
"*",
"*",
"kwargs",
")",
"created",
"=",
"True",
"calculate_hash",
"(",
"obj",
")",
"obj",
".",
"save",
"(",
")",
"return",
"(",
"obj",
",",
"created",
")"
] |
Implements get_or_create logic for models that inherit from
representatives.models.HashableModel because we don't have access to model
methods in a migration scenario.
|
[
"Implements",
"get_or_create",
"logic",
"for",
"models",
"that",
"inherit",
"from",
"representatives",
".",
"models",
".",
"HashableModel",
"because",
"we",
"don",
"t",
"have",
"access",
"to",
"model",
"methods",
"in",
"a",
"migration",
"scenario",
"."
] |
811c90d0250149e913e6196f0ab11c97d396be39
|
https://github.com/political-memory/django-representatives/blob/811c90d0250149e913e6196f0ab11c97d396be39/representatives/migrations/0016_chamber_migrate_data.py#L39-L55
|
243,049
|
gongliyu/decoutils
|
decoutils/__init__.py
|
decorator_with_args
|
def decorator_with_args(func, return_original=False, target_pos=0):
"""Enable a function to work with a decorator with arguments
Args:
func (callable): The input function.
return_original (bool): Whether the resultant decorator returns
the decorating target unchanged. If True, will return the
target unchanged. Otherwise, return the returned value from
*func*. Default to False. This is useful for converting a
non-decorator function to a decorator. See examples below.
Return:
callable: a decorator with arguments.
Examples:
>>> @decorator_with_args
... def register_plugin(plugin, arg1=1):
... print('Registering '+plugin.__name__+' with arg1='+str(arg1))
... return plugin # note register_plugin is an ordinary decorator
>>> @register_plugin(arg1=10)
... def plugin1(): pass
Registering plugin1 with arg1=10
>>> @decorator_with_args(return_original=True)
... def register_plugin_xx(plugin, arg1=1):
... print('Registering '+plugin.__name__+' with arg1='+str(arg1))
... # Note register_plugin_xxx does not return plugin, so it cannot
... # be used as a decorator directly before applying
... # decorator_with_args.
>>> @register_plugin_xx(arg1=10)
... def plugin1(): pass
Registering plugin1 with arg1=10
>>> plugin1()
>>> @decorator_with_args(return_original=True)
... def register_plugin_xxx(plugin, arg1=1): pass
>>> # use result decorator as a function
>>> register_plugin_xxx(plugin=plugin1, arg1=10)
<function plugin1...>
>>> @decorator_with_args(return_original=True, target_pos=1)
... def register_plugin_xxxx(arg1, plugin, arg2=10):
... print('Registering '+plugin.__name__+' with arg1='+str(arg1))
>>> @register_plugin_xxxx(100)
... def plugin2(): pass
Registering plugin2 with arg1=100
"""
if sys.version_info[0] >= 3:
target_name = inspect.getfullargspec(func).args[target_pos]
else:
target_name = inspect.getargspec(func).args[target_pos]
@functools.wraps(func)
def wrapper(*args, **kwargs):
if len(args) > target_pos:
res = func(*args, **kwargs)
return args[target_pos] if return_original else res
elif len(args) <= 0 and target_name in kwargs:
res = func(*args, **kwargs)
return kwargs[target_name] if return_original else res
else:
return wrap_with_args(*args, **kwargs)
def wrap_with_args(*args, **kwargs):
def wrapped_with_args(target):
kwargs2 = dict()
kwargs2[target_name] = target
kwargs2.update(kwargs)
res = func(*args, **kwargs2)
return target if return_original else res
return wrapped_with_args
return wrapper
|
python
|
def decorator_with_args(func, return_original=False, target_pos=0):
"""Enable a function to work with a decorator with arguments
Args:
func (callable): The input function.
return_original (bool): Whether the resultant decorator returns
the decorating target unchanged. If True, will return the
target unchanged. Otherwise, return the returned value from
*func*. Default to False. This is useful for converting a
non-decorator function to a decorator. See examples below.
Return:
callable: a decorator with arguments.
Examples:
>>> @decorator_with_args
... def register_plugin(plugin, arg1=1):
... print('Registering '+plugin.__name__+' with arg1='+str(arg1))
... return plugin # note register_plugin is an ordinary decorator
>>> @register_plugin(arg1=10)
... def plugin1(): pass
Registering plugin1 with arg1=10
>>> @decorator_with_args(return_original=True)
... def register_plugin_xx(plugin, arg1=1):
... print('Registering '+plugin.__name__+' with arg1='+str(arg1))
... # Note register_plugin_xxx does not return plugin, so it cannot
... # be used as a decorator directly before applying
... # decorator_with_args.
>>> @register_plugin_xx(arg1=10)
... def plugin1(): pass
Registering plugin1 with arg1=10
>>> plugin1()
>>> @decorator_with_args(return_original=True)
... def register_plugin_xxx(plugin, arg1=1): pass
>>> # use result decorator as a function
>>> register_plugin_xxx(plugin=plugin1, arg1=10)
<function plugin1...>
>>> @decorator_with_args(return_original=True, target_pos=1)
... def register_plugin_xxxx(arg1, plugin, arg2=10):
... print('Registering '+plugin.__name__+' with arg1='+str(arg1))
>>> @register_plugin_xxxx(100)
... def plugin2(): pass
Registering plugin2 with arg1=100
"""
if sys.version_info[0] >= 3:
target_name = inspect.getfullargspec(func).args[target_pos]
else:
target_name = inspect.getargspec(func).args[target_pos]
@functools.wraps(func)
def wrapper(*args, **kwargs):
if len(args) > target_pos:
res = func(*args, **kwargs)
return args[target_pos] if return_original else res
elif len(args) <= 0 and target_name in kwargs:
res = func(*args, **kwargs)
return kwargs[target_name] if return_original else res
else:
return wrap_with_args(*args, **kwargs)
def wrap_with_args(*args, **kwargs):
def wrapped_with_args(target):
kwargs2 = dict()
kwargs2[target_name] = target
kwargs2.update(kwargs)
res = func(*args, **kwargs2)
return target if return_original else res
return wrapped_with_args
return wrapper
|
[
"def",
"decorator_with_args",
"(",
"func",
",",
"return_original",
"=",
"False",
",",
"target_pos",
"=",
"0",
")",
":",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
">=",
"3",
":",
"target_name",
"=",
"inspect",
".",
"getfullargspec",
"(",
"func",
")",
".",
"args",
"[",
"target_pos",
"]",
"else",
":",
"target_name",
"=",
"inspect",
".",
"getargspec",
"(",
"func",
")",
".",
"args",
"[",
"target_pos",
"]",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"len",
"(",
"args",
")",
">",
"target_pos",
":",
"res",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"args",
"[",
"target_pos",
"]",
"if",
"return_original",
"else",
"res",
"elif",
"len",
"(",
"args",
")",
"<=",
"0",
"and",
"target_name",
"in",
"kwargs",
":",
"res",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"kwargs",
"[",
"target_name",
"]",
"if",
"return_original",
"else",
"res",
"else",
":",
"return",
"wrap_with_args",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"def",
"wrap_with_args",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"wrapped_with_args",
"(",
"target",
")",
":",
"kwargs2",
"=",
"dict",
"(",
")",
"kwargs2",
"[",
"target_name",
"]",
"=",
"target",
"kwargs2",
".",
"update",
"(",
"kwargs",
")",
"res",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs2",
")",
"return",
"target",
"if",
"return_original",
"else",
"res",
"return",
"wrapped_with_args",
"return",
"wrapper"
] |
Enable a function to work with a decorator with arguments
Args:
func (callable): The input function.
return_original (bool): Whether the resultant decorator returns
the decorating target unchanged. If True, will return the
target unchanged. Otherwise, return the returned value from
*func*. Default to False. This is useful for converting a
non-decorator function to a decorator. See examples below.
Return:
callable: a decorator with arguments.
Examples:
>>> @decorator_with_args
... def register_plugin(plugin, arg1=1):
... print('Registering '+plugin.__name__+' with arg1='+str(arg1))
... return plugin # note register_plugin is an ordinary decorator
>>> @register_plugin(arg1=10)
... def plugin1(): pass
Registering plugin1 with arg1=10
>>> @decorator_with_args(return_original=True)
... def register_plugin_xx(plugin, arg1=1):
... print('Registering '+plugin.__name__+' with arg1='+str(arg1))
... # Note register_plugin_xxx does not return plugin, so it cannot
... # be used as a decorator directly before applying
... # decorator_with_args.
>>> @register_plugin_xx(arg1=10)
... def plugin1(): pass
Registering plugin1 with arg1=10
>>> plugin1()
>>> @decorator_with_args(return_original=True)
... def register_plugin_xxx(plugin, arg1=1): pass
>>> # use result decorator as a function
>>> register_plugin_xxx(plugin=plugin1, arg1=10)
<function plugin1...>
>>> @decorator_with_args(return_original=True, target_pos=1)
... def register_plugin_xxxx(arg1, plugin, arg2=10):
... print('Registering '+plugin.__name__+' with arg1='+str(arg1))
>>> @register_plugin_xxxx(100)
... def plugin2(): pass
Registering plugin2 with arg1=100
|
[
"Enable",
"a",
"function",
"to",
"work",
"with",
"a",
"decorator",
"with",
"arguments"
] |
f826cda612da7e6e0767bb0e1f0ac509f8316322
|
https://github.com/gongliyu/decoutils/blob/f826cda612da7e6e0767bb0e1f0ac509f8316322/decoutils/__init__.py#L9-L85
|
243,050
|
janbrohl/XMLCompare
|
xmlcompare.py
|
elements_equal
|
def elements_equal(first, *others):
"""
Check elements for equality
"""
f = first
lf = list(f)
for e in others:
le = list(e)
if (len(lf) != len(le)
or f.tag != e.tag
or f.text != e.text
or f.tail != e.tail
or f.attrib != e.attrib
or (not all(map(elements_equal, lf, le)))
):
return False
return True
|
python
|
def elements_equal(first, *others):
"""
Check elements for equality
"""
f = first
lf = list(f)
for e in others:
le = list(e)
if (len(lf) != len(le)
or f.tag != e.tag
or f.text != e.text
or f.tail != e.tail
or f.attrib != e.attrib
or (not all(map(elements_equal, lf, le)))
):
return False
return True
|
[
"def",
"elements_equal",
"(",
"first",
",",
"*",
"others",
")",
":",
"f",
"=",
"first",
"lf",
"=",
"list",
"(",
"f",
")",
"for",
"e",
"in",
"others",
":",
"le",
"=",
"list",
"(",
"e",
")",
"if",
"(",
"len",
"(",
"lf",
")",
"!=",
"len",
"(",
"le",
")",
"or",
"f",
".",
"tag",
"!=",
"e",
".",
"tag",
"or",
"f",
".",
"text",
"!=",
"e",
".",
"text",
"or",
"f",
".",
"tail",
"!=",
"e",
".",
"tail",
"or",
"f",
".",
"attrib",
"!=",
"e",
".",
"attrib",
"or",
"(",
"not",
"all",
"(",
"map",
"(",
"elements_equal",
",",
"lf",
",",
"le",
")",
")",
")",
")",
":",
"return",
"False",
"return",
"True"
] |
Check elements for equality
|
[
"Check",
"elements",
"for",
"equality"
] |
2592cc4a0976b7510bfa44a63822d7fb6c63b74f
|
https://github.com/janbrohl/XMLCompare/blob/2592cc4a0976b7510bfa44a63822d7fb6c63b74f/xmlcompare.py#L12-L28
|
243,051
|
janbrohl/XMLCompare
|
xmlcompare.py
|
get_element
|
def get_element(text_or_tree_or_element):
"""
Get back an ET.Element for several possible input formats
"""
if isinstance(text_or_tree_or_element, ET.Element):
return text_or_tree_or_element
elif isinstance(text_or_tree_or_element, ET.ElementTree):
return text_or_tree_or_element.getroot()
elif isinstance(text_or_tree_or_element, (unicode, bytes)):
return ET.fromstring(text_or_tree_or_element)
else:
return ET.parse(text_or_tree_or_element).getroot()
|
python
|
def get_element(text_or_tree_or_element):
"""
Get back an ET.Element for several possible input formats
"""
if isinstance(text_or_tree_or_element, ET.Element):
return text_or_tree_or_element
elif isinstance(text_or_tree_or_element, ET.ElementTree):
return text_or_tree_or_element.getroot()
elif isinstance(text_or_tree_or_element, (unicode, bytes)):
return ET.fromstring(text_or_tree_or_element)
else:
return ET.parse(text_or_tree_or_element).getroot()
|
[
"def",
"get_element",
"(",
"text_or_tree_or_element",
")",
":",
"if",
"isinstance",
"(",
"text_or_tree_or_element",
",",
"ET",
".",
"Element",
")",
":",
"return",
"text_or_tree_or_element",
"elif",
"isinstance",
"(",
"text_or_tree_or_element",
",",
"ET",
".",
"ElementTree",
")",
":",
"return",
"text_or_tree_or_element",
".",
"getroot",
"(",
")",
"elif",
"isinstance",
"(",
"text_or_tree_or_element",
",",
"(",
"unicode",
",",
"bytes",
")",
")",
":",
"return",
"ET",
".",
"fromstring",
"(",
"text_or_tree_or_element",
")",
"else",
":",
"return",
"ET",
".",
"parse",
"(",
"text_or_tree_or_element",
")",
".",
"getroot",
"(",
")"
] |
Get back an ET.Element for several possible input formats
|
[
"Get",
"back",
"an",
"ET",
".",
"Element",
"for",
"several",
"possible",
"input",
"formats"
] |
2592cc4a0976b7510bfa44a63822d7fb6c63b74f
|
https://github.com/janbrohl/XMLCompare/blob/2592cc4a0976b7510bfa44a63822d7fb6c63b74f/xmlcompare.py#L31-L42
|
243,052
|
deviantony/valigator
|
valigator/celery.py
|
task_failure_handler
|
def task_failure_handler(task_id=None, exception=None,
traceback=None, args=None, **kwargs):
"""Task failure handler"""
# TODO: find a better way to acces workdir/archive/image
task_report = {'task_id': task_id,
'exception': exception,
'traceback': traceback,
'archive': args[1]['archive_path'],
'image': args[1]['image']}
notifier.send_task_failure_report(task_report)
workdir = args[1]['workdir']
remove_file(workdir)
|
python
|
def task_failure_handler(task_id=None, exception=None,
traceback=None, args=None, **kwargs):
"""Task failure handler"""
# TODO: find a better way to acces workdir/archive/image
task_report = {'task_id': task_id,
'exception': exception,
'traceback': traceback,
'archive': args[1]['archive_path'],
'image': args[1]['image']}
notifier.send_task_failure_report(task_report)
workdir = args[1]['workdir']
remove_file(workdir)
|
[
"def",
"task_failure_handler",
"(",
"task_id",
"=",
"None",
",",
"exception",
"=",
"None",
",",
"traceback",
"=",
"None",
",",
"args",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# TODO: find a better way to acces workdir/archive/image",
"task_report",
"=",
"{",
"'task_id'",
":",
"task_id",
",",
"'exception'",
":",
"exception",
",",
"'traceback'",
":",
"traceback",
",",
"'archive'",
":",
"args",
"[",
"1",
"]",
"[",
"'archive_path'",
"]",
",",
"'image'",
":",
"args",
"[",
"1",
"]",
"[",
"'image'",
"]",
"}",
"notifier",
".",
"send_task_failure_report",
"(",
"task_report",
")",
"workdir",
"=",
"args",
"[",
"1",
"]",
"[",
"'workdir'",
"]",
"remove_file",
"(",
"workdir",
")"
] |
Task failure handler
|
[
"Task",
"failure",
"handler"
] |
0557029bc58ea1270e358c14ca382d3807ed5b6f
|
https://github.com/deviantony/valigator/blob/0557029bc58ea1270e358c14ca382d3807ed5b6f/valigator/celery.py#L29-L40
|
243,053
|
esterhui/pypu
|
pypu/service_flickr.py
|
service_flickr._update_config
|
def _update_config(self,directory,filename):
"""Manages FLICKR config files"""
basefilename=os.path.splitext(filename)[0]
ext=os.path.splitext(filename)[1].lower()
if filename==LOCATION_FILE:
print("%s - Updating geotag information"%(LOCATION_FILE))
return self._update_config_location(directory)
elif filename==TAG_FILE:
print("%s - Updating tags"%(TAG_FILE))
return self._update_config_tags(directory)
elif filename==SET_FILE:
print("%s - Updating sets"%(SET_FILE))
return self._update_config_sets(directory)
elif filename==MEGAPIXEL_FILE:
print("%s - Updating photo size"%(MEGAPIXEL_FILE))
return self._upload_media(directory,resize_request=True)
elif ext in self.FLICKR_META_EXTENSIONS:
return self._update_meta(directory,basefilename)
return False
|
python
|
def _update_config(self,directory,filename):
"""Manages FLICKR config files"""
basefilename=os.path.splitext(filename)[0]
ext=os.path.splitext(filename)[1].lower()
if filename==LOCATION_FILE:
print("%s - Updating geotag information"%(LOCATION_FILE))
return self._update_config_location(directory)
elif filename==TAG_FILE:
print("%s - Updating tags"%(TAG_FILE))
return self._update_config_tags(directory)
elif filename==SET_FILE:
print("%s - Updating sets"%(SET_FILE))
return self._update_config_sets(directory)
elif filename==MEGAPIXEL_FILE:
print("%s - Updating photo size"%(MEGAPIXEL_FILE))
return self._upload_media(directory,resize_request=True)
elif ext in self.FLICKR_META_EXTENSIONS:
return self._update_meta(directory,basefilename)
return False
|
[
"def",
"_update_config",
"(",
"self",
",",
"directory",
",",
"filename",
")",
":",
"basefilename",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"[",
"0",
"]",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"[",
"1",
"]",
".",
"lower",
"(",
")",
"if",
"filename",
"==",
"LOCATION_FILE",
":",
"print",
"(",
"\"%s - Updating geotag information\"",
"%",
"(",
"LOCATION_FILE",
")",
")",
"return",
"self",
".",
"_update_config_location",
"(",
"directory",
")",
"elif",
"filename",
"==",
"TAG_FILE",
":",
"print",
"(",
"\"%s - Updating tags\"",
"%",
"(",
"TAG_FILE",
")",
")",
"return",
"self",
".",
"_update_config_tags",
"(",
"directory",
")",
"elif",
"filename",
"==",
"SET_FILE",
":",
"print",
"(",
"\"%s - Updating sets\"",
"%",
"(",
"SET_FILE",
")",
")",
"return",
"self",
".",
"_update_config_sets",
"(",
"directory",
")",
"elif",
"filename",
"==",
"MEGAPIXEL_FILE",
":",
"print",
"(",
"\"%s - Updating photo size\"",
"%",
"(",
"MEGAPIXEL_FILE",
")",
")",
"return",
"self",
".",
"_upload_media",
"(",
"directory",
",",
"resize_request",
"=",
"True",
")",
"elif",
"ext",
"in",
"self",
".",
"FLICKR_META_EXTENSIONS",
":",
"return",
"self",
".",
"_update_meta",
"(",
"directory",
",",
"basefilename",
")",
"return",
"False"
] |
Manages FLICKR config files
|
[
"Manages",
"FLICKR",
"config",
"files"
] |
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
|
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/service_flickr.py#L122-L141
|
243,054
|
esterhui/pypu
|
pypu/service_flickr.py
|
service_flickr._update_meta
|
def _update_meta(self,directory,filename):
"""Opens up filename.title and
filename.description, updates on flickr"""
if not self._connectToFlickr():
print("%s - Couldn't connect to flickr"%(directory))
return False
db = self._loadDB(directory)
# Look up photo id for this photo
pid=db[filename]['photoid']
# =========== LOAD TITLE ========
fullfile=os.path.join(directory,filename+'.title')
try:
logger.debug('trying to open [%s]'%(fullfile))
_title=(open(fullfile).readline().strip())
logger.debug("_updatemeta: %s - title is %s",filename,_title)
except:
_title=''
# =========== LOAD DESCRIPTION ========
fullfile=os.path.join(directory,filename+'.description')
try:
_description=(open(fullfile).readline().strip())
logger.debug("_updatemeta: %s - description is %s",filename,_description)
except:
_description=''
logger.info('%s - updating metadata (title=%s) (description=%s)'\
%(filename,_title,_description))
resp=self.flickr.photos_setMeta(photo_id=pid,title=_title,\
description=_description)
if resp.attrib['stat']!='ok':
logger.error("%s - flickr: photos_setTags failed with status: %s",\
resp.attrib['stat']);
return False
else:
return True
|
python
|
def _update_meta(self,directory,filename):
"""Opens up filename.title and
filename.description, updates on flickr"""
if not self._connectToFlickr():
print("%s - Couldn't connect to flickr"%(directory))
return False
db = self._loadDB(directory)
# Look up photo id for this photo
pid=db[filename]['photoid']
# =========== LOAD TITLE ========
fullfile=os.path.join(directory,filename+'.title')
try:
logger.debug('trying to open [%s]'%(fullfile))
_title=(open(fullfile).readline().strip())
logger.debug("_updatemeta: %s - title is %s",filename,_title)
except:
_title=''
# =========== LOAD DESCRIPTION ========
fullfile=os.path.join(directory,filename+'.description')
try:
_description=(open(fullfile).readline().strip())
logger.debug("_updatemeta: %s - description is %s",filename,_description)
except:
_description=''
logger.info('%s - updating metadata (title=%s) (description=%s)'\
%(filename,_title,_description))
resp=self.flickr.photos_setMeta(photo_id=pid,title=_title,\
description=_description)
if resp.attrib['stat']!='ok':
logger.error("%s - flickr: photos_setTags failed with status: %s",\
resp.attrib['stat']);
return False
else:
return True
|
[
"def",
"_update_meta",
"(",
"self",
",",
"directory",
",",
"filename",
")",
":",
"if",
"not",
"self",
".",
"_connectToFlickr",
"(",
")",
":",
"print",
"(",
"\"%s - Couldn't connect to flickr\"",
"%",
"(",
"directory",
")",
")",
"return",
"False",
"db",
"=",
"self",
".",
"_loadDB",
"(",
"directory",
")",
"# Look up photo id for this photo",
"pid",
"=",
"db",
"[",
"filename",
"]",
"[",
"'photoid'",
"]",
"# =========== LOAD TITLE ========",
"fullfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"filename",
"+",
"'.title'",
")",
"try",
":",
"logger",
".",
"debug",
"(",
"'trying to open [%s]'",
"%",
"(",
"fullfile",
")",
")",
"_title",
"=",
"(",
"open",
"(",
"fullfile",
")",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
")",
"logger",
".",
"debug",
"(",
"\"_updatemeta: %s - title is %s\"",
",",
"filename",
",",
"_title",
")",
"except",
":",
"_title",
"=",
"''",
"# =========== LOAD DESCRIPTION ========",
"fullfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"filename",
"+",
"'.description'",
")",
"try",
":",
"_description",
"=",
"(",
"open",
"(",
"fullfile",
")",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
")",
"logger",
".",
"debug",
"(",
"\"_updatemeta: %s - description is %s\"",
",",
"filename",
",",
"_description",
")",
"except",
":",
"_description",
"=",
"''",
"logger",
".",
"info",
"(",
"'%s - updating metadata (title=%s) (description=%s)'",
"%",
"(",
"filename",
",",
"_title",
",",
"_description",
")",
")",
"resp",
"=",
"self",
".",
"flickr",
".",
"photos_setMeta",
"(",
"photo_id",
"=",
"pid",
",",
"title",
"=",
"_title",
",",
"description",
"=",
"_description",
")",
"if",
"resp",
".",
"attrib",
"[",
"'stat'",
"]",
"!=",
"'ok'",
":",
"logger",
".",
"error",
"(",
"\"%s - flickr: photos_setTags failed with status: %s\"",
",",
"resp",
".",
"attrib",
"[",
"'stat'",
"]",
")",
"return",
"False",
"else",
":",
"return",
"True"
] |
Opens up filename.title and
filename.description, updates on flickr
|
[
"Opens",
"up",
"filename",
".",
"title",
"and",
"filename",
".",
"description",
"updates",
"on",
"flickr"
] |
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
|
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/service_flickr.py#L143-L182
|
243,055
|
esterhui/pypu
|
pypu/service_flickr.py
|
service_flickr._createphotoset
|
def _createphotoset(self,myset,primary_photoid):
"""Creates a photo set on Flickr"""
if not self._connectToFlickr():
print("%s - Couldn't connect to flickr"%(directory))
return False
logger.debug('Creating photo set %s with prim photo %s'\
%(myset,primary_photoid))
resp=self.flickr.photosets_create(title=myset,\
primary_photo_id=primary_photoid)
if resp.attrib['stat']!='ok':
logger.error("%s - flickr: photos_setTags failed with status: %s",\
resp.attrib['stat']);
return False
else:
return True
|
python
|
def _createphotoset(self,myset,primary_photoid):
"""Creates a photo set on Flickr"""
if not self._connectToFlickr():
print("%s - Couldn't connect to flickr"%(directory))
return False
logger.debug('Creating photo set %s with prim photo %s'\
%(myset,primary_photoid))
resp=self.flickr.photosets_create(title=myset,\
primary_photo_id=primary_photoid)
if resp.attrib['stat']!='ok':
logger.error("%s - flickr: photos_setTags failed with status: %s",\
resp.attrib['stat']);
return False
else:
return True
|
[
"def",
"_createphotoset",
"(",
"self",
",",
"myset",
",",
"primary_photoid",
")",
":",
"if",
"not",
"self",
".",
"_connectToFlickr",
"(",
")",
":",
"print",
"(",
"\"%s - Couldn't connect to flickr\"",
"%",
"(",
"directory",
")",
")",
"return",
"False",
"logger",
".",
"debug",
"(",
"'Creating photo set %s with prim photo %s'",
"%",
"(",
"myset",
",",
"primary_photoid",
")",
")",
"resp",
"=",
"self",
".",
"flickr",
".",
"photosets_create",
"(",
"title",
"=",
"myset",
",",
"primary_photo_id",
"=",
"primary_photoid",
")",
"if",
"resp",
".",
"attrib",
"[",
"'stat'",
"]",
"!=",
"'ok'",
":",
"logger",
".",
"error",
"(",
"\"%s - flickr: photos_setTags failed with status: %s\"",
",",
"resp",
".",
"attrib",
"[",
"'stat'",
"]",
")",
"return",
"False",
"else",
":",
"return",
"True"
] |
Creates a photo set on Flickr
|
[
"Creates",
"a",
"photo",
"set",
"on",
"Flickr"
] |
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
|
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/service_flickr.py#L242-L257
|
243,056
|
esterhui/pypu
|
pypu/service_flickr.py
|
service_flickr._update_config_sets
|
def _update_config_sets(self,directory,files=None):
"""
Loads set information from file and updates on flickr,
only reads first line. Format is comma separated eg.
travel, 2010, South Africa, Pretoria
If files is None, will update all files in DB, otherwise
will only update files that are in the flickr DB and files list
"""
if not self._connectToFlickr():
print("%s - Couldn't connect to flickr"%(directory))
return False
# Load sets from SET_FILE
_sets=self._load_sets(directory)
# Connect to flickr and get dicionary of photosets
psets=self._getphotosets()
db = self._loadDB(directory)
# To create a set, one needs to pass it the primary
# photo to use, let's open the DB and load the first
# photo
primary_pid=db[db.keys()[0]]['photoid']
# Loop through all sets, create if it doesn't exist
for myset in _sets:
if myset not in psets:
logger.info('set [%s] not in flickr sets, will create set'%(myset))
self._createphotoset(myset,primary_pid)
# Now reaload photosets from flickr
psets=self._getphotosets()
# --- Load DB of photos, and update them all with new tags
for fn in db:
# --- If file list provided, skip files not in the list
if files and fn not in files:
continue
pid=db[fn]['photoid']
# Get all the photosets this photo belongs to
psets_for_photo=self._getphotosets_forphoto(pid)
for myset in _sets:
if myset in psets_for_photo:
logger.debug("%s - Already in photoset [%s] - skipping"%(fn,myset))
continue
logger.info("%s [flickr] Adding to set [%s]" %(fn,myset))
psid=psets[myset]['id']
logger.debug("%s - Adding to photoset %s"%(fn,psid))
resp=self.flickr.photosets_addPhoto(photoset_id=psid,photo_id=pid)
if resp.attrib['stat']!='ok':
logger.error("%s - flickr: photos_addPhoto failed with status: %s",\
resp.attrib['stat']);
return False
# Go through all sets flickr says this photo belongs to and
# remove from those sets if they don't appear in SET_FILE
for pset in psets_for_photo:
if pset not in _sets:
psid=psets[pset]['id']
logger.info("%s [flickr] Removing from set [%s]" %(fn,pset))
logger.debug("%s - Removing from photoset %s"%(fn,psid))
resp=self.flickr.photosets_removePhoto(photoset_id=psid,photo_id=pid)
if resp.attrib['stat']!='ok':
logger.error("%s - flickr: photossets_removePhoto failed with status: %s",\
resp.attrib['stat']);
return False
return True
|
python
|
def _update_config_sets(self,directory,files=None):
"""
Loads set information from file and updates on flickr,
only reads first line. Format is comma separated eg.
travel, 2010, South Africa, Pretoria
If files is None, will update all files in DB, otherwise
will only update files that are in the flickr DB and files list
"""
if not self._connectToFlickr():
print("%s - Couldn't connect to flickr"%(directory))
return False
# Load sets from SET_FILE
_sets=self._load_sets(directory)
# Connect to flickr and get dicionary of photosets
psets=self._getphotosets()
db = self._loadDB(directory)
# To create a set, one needs to pass it the primary
# photo to use, let's open the DB and load the first
# photo
primary_pid=db[db.keys()[0]]['photoid']
# Loop through all sets, create if it doesn't exist
for myset in _sets:
if myset not in psets:
logger.info('set [%s] not in flickr sets, will create set'%(myset))
self._createphotoset(myset,primary_pid)
# Now reaload photosets from flickr
psets=self._getphotosets()
# --- Load DB of photos, and update them all with new tags
for fn in db:
# --- If file list provided, skip files not in the list
if files and fn not in files:
continue
pid=db[fn]['photoid']
# Get all the photosets this photo belongs to
psets_for_photo=self._getphotosets_forphoto(pid)
for myset in _sets:
if myset in psets_for_photo:
logger.debug("%s - Already in photoset [%s] - skipping"%(fn,myset))
continue
logger.info("%s [flickr] Adding to set [%s]" %(fn,myset))
psid=psets[myset]['id']
logger.debug("%s - Adding to photoset %s"%(fn,psid))
resp=self.flickr.photosets_addPhoto(photoset_id=psid,photo_id=pid)
if resp.attrib['stat']!='ok':
logger.error("%s - flickr: photos_addPhoto failed with status: %s",\
resp.attrib['stat']);
return False
# Go through all sets flickr says this photo belongs to and
# remove from those sets if they don't appear in SET_FILE
for pset in psets_for_photo:
if pset not in _sets:
psid=psets[pset]['id']
logger.info("%s [flickr] Removing from set [%s]" %(fn,pset))
logger.debug("%s - Removing from photoset %s"%(fn,psid))
resp=self.flickr.photosets_removePhoto(photoset_id=psid,photo_id=pid)
if resp.attrib['stat']!='ok':
logger.error("%s - flickr: photossets_removePhoto failed with status: %s",\
resp.attrib['stat']);
return False
return True
|
[
"def",
"_update_config_sets",
"(",
"self",
",",
"directory",
",",
"files",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"_connectToFlickr",
"(",
")",
":",
"print",
"(",
"\"%s - Couldn't connect to flickr\"",
"%",
"(",
"directory",
")",
")",
"return",
"False",
"# Load sets from SET_FILE",
"_sets",
"=",
"self",
".",
"_load_sets",
"(",
"directory",
")",
"# Connect to flickr and get dicionary of photosets",
"psets",
"=",
"self",
".",
"_getphotosets",
"(",
")",
"db",
"=",
"self",
".",
"_loadDB",
"(",
"directory",
")",
"# To create a set, one needs to pass it the primary",
"# photo to use, let's open the DB and load the first",
"# photo",
"primary_pid",
"=",
"db",
"[",
"db",
".",
"keys",
"(",
")",
"[",
"0",
"]",
"]",
"[",
"'photoid'",
"]",
"# Loop through all sets, create if it doesn't exist",
"for",
"myset",
"in",
"_sets",
":",
"if",
"myset",
"not",
"in",
"psets",
":",
"logger",
".",
"info",
"(",
"'set [%s] not in flickr sets, will create set'",
"%",
"(",
"myset",
")",
")",
"self",
".",
"_createphotoset",
"(",
"myset",
",",
"primary_pid",
")",
"# Now reaload photosets from flickr",
"psets",
"=",
"self",
".",
"_getphotosets",
"(",
")",
"# --- Load DB of photos, and update them all with new tags",
"for",
"fn",
"in",
"db",
":",
"# --- If file list provided, skip files not in the list",
"if",
"files",
"and",
"fn",
"not",
"in",
"files",
":",
"continue",
"pid",
"=",
"db",
"[",
"fn",
"]",
"[",
"'photoid'",
"]",
"# Get all the photosets this photo belongs to",
"psets_for_photo",
"=",
"self",
".",
"_getphotosets_forphoto",
"(",
"pid",
")",
"for",
"myset",
"in",
"_sets",
":",
"if",
"myset",
"in",
"psets_for_photo",
":",
"logger",
".",
"debug",
"(",
"\"%s - Already in photoset [%s] - skipping\"",
"%",
"(",
"fn",
",",
"myset",
")",
")",
"continue",
"logger",
".",
"info",
"(",
"\"%s [flickr] Adding to set [%s]\"",
"%",
"(",
"fn",
",",
"myset",
")",
")",
"psid",
"=",
"psets",
"[",
"myset",
"]",
"[",
"'id'",
"]",
"logger",
".",
"debug",
"(",
"\"%s - Adding to photoset %s\"",
"%",
"(",
"fn",
",",
"psid",
")",
")",
"resp",
"=",
"self",
".",
"flickr",
".",
"photosets_addPhoto",
"(",
"photoset_id",
"=",
"psid",
",",
"photo_id",
"=",
"pid",
")",
"if",
"resp",
".",
"attrib",
"[",
"'stat'",
"]",
"!=",
"'ok'",
":",
"logger",
".",
"error",
"(",
"\"%s - flickr: photos_addPhoto failed with status: %s\"",
",",
"resp",
".",
"attrib",
"[",
"'stat'",
"]",
")",
"return",
"False",
"# Go through all sets flickr says this photo belongs to and",
"# remove from those sets if they don't appear in SET_FILE",
"for",
"pset",
"in",
"psets_for_photo",
":",
"if",
"pset",
"not",
"in",
"_sets",
":",
"psid",
"=",
"psets",
"[",
"pset",
"]",
"[",
"'id'",
"]",
"logger",
".",
"info",
"(",
"\"%s [flickr] Removing from set [%s]\"",
"%",
"(",
"fn",
",",
"pset",
")",
")",
"logger",
".",
"debug",
"(",
"\"%s - Removing from photoset %s\"",
"%",
"(",
"fn",
",",
"psid",
")",
")",
"resp",
"=",
"self",
".",
"flickr",
".",
"photosets_removePhoto",
"(",
"photoset_id",
"=",
"psid",
",",
"photo_id",
"=",
"pid",
")",
"if",
"resp",
".",
"attrib",
"[",
"'stat'",
"]",
"!=",
"'ok'",
":",
"logger",
".",
"error",
"(",
"\"%s - flickr: photossets_removePhoto failed with status: %s\"",
",",
"resp",
".",
"attrib",
"[",
"'stat'",
"]",
")",
"return",
"False",
"return",
"True"
] |
Loads set information from file and updates on flickr,
only reads first line. Format is comma separated eg.
travel, 2010, South Africa, Pretoria
If files is None, will update all files in DB, otherwise
will only update files that are in the flickr DB and files list
|
[
"Loads",
"set",
"information",
"from",
"file",
"and",
"updates",
"on",
"flickr",
"only",
"reads",
"first",
"line",
".",
"Format",
"is",
"comma",
"separated",
"eg",
".",
"travel",
"2010",
"South",
"Africa",
"Pretoria",
"If",
"files",
"is",
"None",
"will",
"update",
"all",
"files",
"in",
"DB",
"otherwise",
"will",
"only",
"update",
"files",
"that",
"are",
"in",
"the",
"flickr",
"DB",
"and",
"files",
"list"
] |
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
|
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/service_flickr.py#L259-L330
|
243,057
|
esterhui/pypu
|
pypu/service_flickr.py
|
service_flickr._getphotosets_forphoto
|
def _getphotosets_forphoto(self,pid):
"""Asks flickr which photosets photo with
given pid belongs to, returns list of
photoset names"""
resp=self.flickr.photos_getAllContexts(photo_id=pid)
if resp.attrib['stat']!='ok':
logger.error("%s - flickr: photos_getAllContext failed with status: %s",\
resp.attrib['stat']);
return None
lphotosets=[]
for element in resp.findall('set'):
lphotosets.append(element.attrib['title'])
logger.debug('%s - belongs to these photosets %s',pid,lphotosets)
return lphotosets
|
python
|
def _getphotosets_forphoto(self,pid):
"""Asks flickr which photosets photo with
given pid belongs to, returns list of
photoset names"""
resp=self.flickr.photos_getAllContexts(photo_id=pid)
if resp.attrib['stat']!='ok':
logger.error("%s - flickr: photos_getAllContext failed with status: %s",\
resp.attrib['stat']);
return None
lphotosets=[]
for element in resp.findall('set'):
lphotosets.append(element.attrib['title'])
logger.debug('%s - belongs to these photosets %s',pid,lphotosets)
return lphotosets
|
[
"def",
"_getphotosets_forphoto",
"(",
"self",
",",
"pid",
")",
":",
"resp",
"=",
"self",
".",
"flickr",
".",
"photos_getAllContexts",
"(",
"photo_id",
"=",
"pid",
")",
"if",
"resp",
".",
"attrib",
"[",
"'stat'",
"]",
"!=",
"'ok'",
":",
"logger",
".",
"error",
"(",
"\"%s - flickr: photos_getAllContext failed with status: %s\"",
",",
"resp",
".",
"attrib",
"[",
"'stat'",
"]",
")",
"return",
"None",
"lphotosets",
"=",
"[",
"]",
"for",
"element",
"in",
"resp",
".",
"findall",
"(",
"'set'",
")",
":",
"lphotosets",
".",
"append",
"(",
"element",
".",
"attrib",
"[",
"'title'",
"]",
")",
"logger",
".",
"debug",
"(",
"'%s - belongs to these photosets %s'",
",",
"pid",
",",
"lphotosets",
")",
"return",
"lphotosets"
] |
Asks flickr which photosets photo with
given pid belongs to, returns list of
photoset names
|
[
"Asks",
"flickr",
"which",
"photosets",
"photo",
"with",
"given",
"pid",
"belongs",
"to",
"returns",
"list",
"of",
"photoset",
"names"
] |
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
|
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/service_flickr.py#L333-L350
|
243,058
|
esterhui/pypu
|
pypu/service_flickr.py
|
service_flickr._getphoto_originalsize
|
def _getphoto_originalsize(self,pid):
"""Asks flickr for photo original size
returns tuple with width,height
"""
logger.debug('%s - Getting original size from flickr'%(pid))
width=None
height=None
resp=self.flickr.photos_getSizes(photo_id=pid)
if resp.attrib['stat']!='ok':
logger.error("%s - flickr: photos_getSizes failed with status: %s",\
resp.attrib['stat']);
return (None,None)
for size in resp.find('sizes').findall('size'):
if size.attrib['label']=="Original":
width=int(size.attrib['width'])
height=int(size.attrib['height'])
logger.debug('Found pid %s original size of %s,%s'\
%(pid,width,height))
return (width,height)
|
python
|
def _getphoto_originalsize(self,pid):
"""Asks flickr for photo original size
returns tuple with width,height
"""
logger.debug('%s - Getting original size from flickr'%(pid))
width=None
height=None
resp=self.flickr.photos_getSizes(photo_id=pid)
if resp.attrib['stat']!='ok':
logger.error("%s - flickr: photos_getSizes failed with status: %s",\
resp.attrib['stat']);
return (None,None)
for size in resp.find('sizes').findall('size'):
if size.attrib['label']=="Original":
width=int(size.attrib['width'])
height=int(size.attrib['height'])
logger.debug('Found pid %s original size of %s,%s'\
%(pid,width,height))
return (width,height)
|
[
"def",
"_getphoto_originalsize",
"(",
"self",
",",
"pid",
")",
":",
"logger",
".",
"debug",
"(",
"'%s - Getting original size from flickr'",
"%",
"(",
"pid",
")",
")",
"width",
"=",
"None",
"height",
"=",
"None",
"resp",
"=",
"self",
".",
"flickr",
".",
"photos_getSizes",
"(",
"photo_id",
"=",
"pid",
")",
"if",
"resp",
".",
"attrib",
"[",
"'stat'",
"]",
"!=",
"'ok'",
":",
"logger",
".",
"error",
"(",
"\"%s - flickr: photos_getSizes failed with status: %s\"",
",",
"resp",
".",
"attrib",
"[",
"'stat'",
"]",
")",
"return",
"(",
"None",
",",
"None",
")",
"for",
"size",
"in",
"resp",
".",
"find",
"(",
"'sizes'",
")",
".",
"findall",
"(",
"'size'",
")",
":",
"if",
"size",
".",
"attrib",
"[",
"'label'",
"]",
"==",
"\"Original\"",
":",
"width",
"=",
"int",
"(",
"size",
".",
"attrib",
"[",
"'width'",
"]",
")",
"height",
"=",
"int",
"(",
"size",
".",
"attrib",
"[",
"'height'",
"]",
")",
"logger",
".",
"debug",
"(",
"'Found pid %s original size of %s,%s'",
"%",
"(",
"pid",
",",
"width",
",",
"height",
")",
")",
"return",
"(",
"width",
",",
"height",
")"
] |
Asks flickr for photo original size
returns tuple with width,height
|
[
"Asks",
"flickr",
"for",
"photo",
"original",
"size",
"returns",
"tuple",
"with",
"width",
"height"
] |
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
|
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/service_flickr.py#L352-L375
|
243,059
|
esterhui/pypu
|
pypu/service_flickr.py
|
service_flickr._getphoto_information
|
def _getphoto_information(self,pid):
"""Asks flickr for photo information
returns dictionary with attributes
{'dateuploaded': '1383410793',
'farm': '3',
'id': '10628709834',
'isfavorite': '0',
'license': '0',
'media': 'photo',
'originalformat': 'jpg',
'originalsecret': 'b60f4f675f',
'rotation': '0',
'safety_level': '0',
'secret': 'a4c96e996b',
'server': '2823',
'views': '1',
'title': 'Image title'
}
"""
if not self._connectToFlickr():
print("%s - Couldn't connect to flickr"%(directory))
return False
d={}
logger.debug('%s - Getting photo information from flickr'%(pid))
resp=self.flickr.photos_getInfo(photo_id=pid)
if resp.attrib['stat']!='ok':
logger.error("%s - flickr: photos_getInfo failed with status: %s",\
resp.attrib['stat']);
return None
p=resp.find('photo')
p.attrib['title']=p.find('title').text
return p.attrib
|
python
|
def _getphoto_information(self,pid):
"""Asks flickr for photo information
returns dictionary with attributes
{'dateuploaded': '1383410793',
'farm': '3',
'id': '10628709834',
'isfavorite': '0',
'license': '0',
'media': 'photo',
'originalformat': 'jpg',
'originalsecret': 'b60f4f675f',
'rotation': '0',
'safety_level': '0',
'secret': 'a4c96e996b',
'server': '2823',
'views': '1',
'title': 'Image title'
}
"""
if not self._connectToFlickr():
print("%s - Couldn't connect to flickr"%(directory))
return False
d={}
logger.debug('%s - Getting photo information from flickr'%(pid))
resp=self.flickr.photos_getInfo(photo_id=pid)
if resp.attrib['stat']!='ok':
logger.error("%s - flickr: photos_getInfo failed with status: %s",\
resp.attrib['stat']);
return None
p=resp.find('photo')
p.attrib['title']=p.find('title').text
return p.attrib
|
[
"def",
"_getphoto_information",
"(",
"self",
",",
"pid",
")",
":",
"if",
"not",
"self",
".",
"_connectToFlickr",
"(",
")",
":",
"print",
"(",
"\"%s - Couldn't connect to flickr\"",
"%",
"(",
"directory",
")",
")",
"return",
"False",
"d",
"=",
"{",
"}",
"logger",
".",
"debug",
"(",
"'%s - Getting photo information from flickr'",
"%",
"(",
"pid",
")",
")",
"resp",
"=",
"self",
".",
"flickr",
".",
"photos_getInfo",
"(",
"photo_id",
"=",
"pid",
")",
"if",
"resp",
".",
"attrib",
"[",
"'stat'",
"]",
"!=",
"'ok'",
":",
"logger",
".",
"error",
"(",
"\"%s - flickr: photos_getInfo failed with status: %s\"",
",",
"resp",
".",
"attrib",
"[",
"'stat'",
"]",
")",
"return",
"None",
"p",
"=",
"resp",
".",
"find",
"(",
"'photo'",
")",
"p",
".",
"attrib",
"[",
"'title'",
"]",
"=",
"p",
".",
"find",
"(",
"'title'",
")",
".",
"text",
"return",
"p",
".",
"attrib"
] |
Asks flickr for photo information
returns dictionary with attributes
{'dateuploaded': '1383410793',
'farm': '3',
'id': '10628709834',
'isfavorite': '0',
'license': '0',
'media': 'photo',
'originalformat': 'jpg',
'originalsecret': 'b60f4f675f',
'rotation': '0',
'safety_level': '0',
'secret': 'a4c96e996b',
'server': '2823',
'views': '1',
'title': 'Image title'
}
|
[
"Asks",
"flickr",
"for",
"photo",
"information",
"returns",
"dictionary",
"with",
"attributes"
] |
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
|
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/service_flickr.py#L429-L467
|
243,060
|
esterhui/pypu
|
pypu/service_flickr.py
|
service_flickr._update_config_tags
|
def _update_config_tags(self,directory,files=None):
"""
Loads tags information from file and updates on flickr,
only reads first line. Format is comma separated eg.
travel, 2010, South Africa, Pretoria
If files is None, will update all files in DB, otherwise
will only update files that are in the flickr DB and files list
"""
if not self._connectToFlickr():
print("%s - Couldn't connect to flickr"%(directory))
return False
logger.debug("Updating tags in %s"%(directory))
_tags=self._load_tags(directory)
# --- Load DB of photos, and update them all with new tags
db = self._loadDB(directory)
for fn in db:
# --- If file list provided, skip files not in the list
if files and fn not in files:
logger.debug('%s [flickr] Skipping, tag update',fn)
continue
logger.info("%s [flickr] Updating tags [%s]" %(fn,_tags))
pid=db[fn]['photoid']
resp=self.flickr.photos_setTags(photo_id=pid,tags=_tags)
if resp.attrib['stat']!='ok':
logger.error("%s - flickr: photos_setTags failed with status: %s",\
resp.attrib['stat']);
return False
else:
return True
return False
|
python
|
def _update_config_tags(self,directory,files=None):
"""
Loads tags information from file and updates on flickr,
only reads first line. Format is comma separated eg.
travel, 2010, South Africa, Pretoria
If files is None, will update all files in DB, otherwise
will only update files that are in the flickr DB and files list
"""
if not self._connectToFlickr():
print("%s - Couldn't connect to flickr"%(directory))
return False
logger.debug("Updating tags in %s"%(directory))
_tags=self._load_tags(directory)
# --- Load DB of photos, and update them all with new tags
db = self._loadDB(directory)
for fn in db:
# --- If file list provided, skip files not in the list
if files and fn not in files:
logger.debug('%s [flickr] Skipping, tag update',fn)
continue
logger.info("%s [flickr] Updating tags [%s]" %(fn,_tags))
pid=db[fn]['photoid']
resp=self.flickr.photos_setTags(photo_id=pid,tags=_tags)
if resp.attrib['stat']!='ok':
logger.error("%s - flickr: photos_setTags failed with status: %s",\
resp.attrib['stat']);
return False
else:
return True
return False
|
[
"def",
"_update_config_tags",
"(",
"self",
",",
"directory",
",",
"files",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"_connectToFlickr",
"(",
")",
":",
"print",
"(",
"\"%s - Couldn't connect to flickr\"",
"%",
"(",
"directory",
")",
")",
"return",
"False",
"logger",
".",
"debug",
"(",
"\"Updating tags in %s\"",
"%",
"(",
"directory",
")",
")",
"_tags",
"=",
"self",
".",
"_load_tags",
"(",
"directory",
")",
"# --- Load DB of photos, and update them all with new tags",
"db",
"=",
"self",
".",
"_loadDB",
"(",
"directory",
")",
"for",
"fn",
"in",
"db",
":",
"# --- If file list provided, skip files not in the list",
"if",
"files",
"and",
"fn",
"not",
"in",
"files",
":",
"logger",
".",
"debug",
"(",
"'%s [flickr] Skipping, tag update'",
",",
"fn",
")",
"continue",
"logger",
".",
"info",
"(",
"\"%s [flickr] Updating tags [%s]\"",
"%",
"(",
"fn",
",",
"_tags",
")",
")",
"pid",
"=",
"db",
"[",
"fn",
"]",
"[",
"'photoid'",
"]",
"resp",
"=",
"self",
".",
"flickr",
".",
"photos_setTags",
"(",
"photo_id",
"=",
"pid",
",",
"tags",
"=",
"_tags",
")",
"if",
"resp",
".",
"attrib",
"[",
"'stat'",
"]",
"!=",
"'ok'",
":",
"logger",
".",
"error",
"(",
"\"%s - flickr: photos_setTags failed with status: %s\"",
",",
"resp",
".",
"attrib",
"[",
"'stat'",
"]",
")",
"return",
"False",
"else",
":",
"return",
"True",
"return",
"False"
] |
Loads tags information from file and updates on flickr,
only reads first line. Format is comma separated eg.
travel, 2010, South Africa, Pretoria
If files is None, will update all files in DB, otherwise
will only update files that are in the flickr DB and files list
|
[
"Loads",
"tags",
"information",
"from",
"file",
"and",
"updates",
"on",
"flickr",
"only",
"reads",
"first",
"line",
".",
"Format",
"is",
"comma",
"separated",
"eg",
".",
"travel",
"2010",
"South",
"Africa",
"Pretoria",
"If",
"files",
"is",
"None",
"will",
"update",
"all",
"files",
"in",
"DB",
"otherwise",
"will",
"only",
"update",
"files",
"that",
"are",
"in",
"the",
"flickr",
"DB",
"and",
"files",
"list"
] |
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
|
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/service_flickr.py#L469-L502
|
243,061
|
esterhui/pypu
|
pypu/service_flickr.py
|
service_flickr._remove_media
|
def _remove_media(self,directory,files=None):
"""Removes specified files from flickr"""
# Connect if we aren't already
if not self._connectToFlickr():
logger.error("%s - Couldn't connect to flickr")
return False
db=self._loadDB(directory)
# If no files given, use files from DB in dir
if not files:
files=db.keys()
#If only one file given, make it a list
if isinstance(files,basestring):
files=[files]
for fn in files:
print("%s - Deleting from flickr [local copy intact]"%(fn))
try:
pid=db[fn]['photoid']
except:
logger.debug("%s - Was never in flickr DB"%(fn))
continue
resp=self.flickr.photos_delete(photo_id=pid,format='etree')
if resp.attrib['stat']!='ok':
print("%s - flickr: delete failed with status: %s",\
resp.attrib['stat']);
return False
else:
logger.debug('Removing %s from flickr DB'%(fn))
del db[fn]
self._saveDB(directory,db)
return True
|
python
|
def _remove_media(self,directory,files=None):
"""Removes specified files from flickr"""
# Connect if we aren't already
if not self._connectToFlickr():
logger.error("%s - Couldn't connect to flickr")
return False
db=self._loadDB(directory)
# If no files given, use files from DB in dir
if not files:
files=db.keys()
#If only one file given, make it a list
if isinstance(files,basestring):
files=[files]
for fn in files:
print("%s - Deleting from flickr [local copy intact]"%(fn))
try:
pid=db[fn]['photoid']
except:
logger.debug("%s - Was never in flickr DB"%(fn))
continue
resp=self.flickr.photos_delete(photo_id=pid,format='etree')
if resp.attrib['stat']!='ok':
print("%s - flickr: delete failed with status: %s",\
resp.attrib['stat']);
return False
else:
logger.debug('Removing %s from flickr DB'%(fn))
del db[fn]
self._saveDB(directory,db)
return True
|
[
"def",
"_remove_media",
"(",
"self",
",",
"directory",
",",
"files",
"=",
"None",
")",
":",
"# Connect if we aren't already",
"if",
"not",
"self",
".",
"_connectToFlickr",
"(",
")",
":",
"logger",
".",
"error",
"(",
"\"%s - Couldn't connect to flickr\"",
")",
"return",
"False",
"db",
"=",
"self",
".",
"_loadDB",
"(",
"directory",
")",
"# If no files given, use files from DB in dir",
"if",
"not",
"files",
":",
"files",
"=",
"db",
".",
"keys",
"(",
")",
"#If only one file given, make it a list",
"if",
"isinstance",
"(",
"files",
",",
"basestring",
")",
":",
"files",
"=",
"[",
"files",
"]",
"for",
"fn",
"in",
"files",
":",
"print",
"(",
"\"%s - Deleting from flickr [local copy intact]\"",
"%",
"(",
"fn",
")",
")",
"try",
":",
"pid",
"=",
"db",
"[",
"fn",
"]",
"[",
"'photoid'",
"]",
"except",
":",
"logger",
".",
"debug",
"(",
"\"%s - Was never in flickr DB\"",
"%",
"(",
"fn",
")",
")",
"continue",
"resp",
"=",
"self",
".",
"flickr",
".",
"photos_delete",
"(",
"photo_id",
"=",
"pid",
",",
"format",
"=",
"'etree'",
")",
"if",
"resp",
".",
"attrib",
"[",
"'stat'",
"]",
"!=",
"'ok'",
":",
"print",
"(",
"\"%s - flickr: delete failed with status: %s\"",
",",
"resp",
".",
"attrib",
"[",
"'stat'",
"]",
")",
"return",
"False",
"else",
":",
"logger",
".",
"debug",
"(",
"'Removing %s from flickr DB'",
"%",
"(",
"fn",
")",
")",
"del",
"db",
"[",
"fn",
"]",
"self",
".",
"_saveDB",
"(",
"directory",
",",
"db",
")",
"return",
"True"
] |
Removes specified files from flickr
|
[
"Removes",
"specified",
"files",
"from",
"flickr"
] |
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
|
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/service_flickr.py#L574-L608
|
243,062
|
esterhui/pypu
|
pypu/service_flickr.py
|
service_flickr._upload_media
|
def _upload_media(self,directory,files=None,resize_request=None):
"""Uploads media file to FLICKR, returns True if
uploaded successfully, Will replace
if already uploaded, If megapixels > 0, will
scale photos before upload
If no filename given, will go through all files in DB"""
# Connect if we aren't already
if not self._connectToFlickr():
logger.error("%s - Couldn't connect to flickr")
return False
_tags=self._load_tags(directory)
_megapixels=self._load_megapixels(directory)
# If no files given, use files from DB in dir
if not files:
db=self._loadDB(directory)
files=db.keys()
#If only one file given, make it a list
if isinstance(files,basestring):
files=[files]
files.sort()
for filename in files:
#FIXME: If this fails, should send a list
# to Upload() about which files DID make it,
# so we don't have to upload it again!
status,replaced=self._upload_or_replace_flickr(directory,filename, \
_tags, _megapixels,resize_request)
if not status:
return False
# If uploaded OK, update photo properties, tags
# already taken care of - only update if
# this is a new photo (eg, if it was replaced
# then we don't need to do this
if not replaced:
self._update_config_location(directory,filename)
self._update_config_sets(directory,filename)
return True
|
python
|
def _upload_media(self,directory,files=None,resize_request=None):
"""Uploads media file to FLICKR, returns True if
uploaded successfully, Will replace
if already uploaded, If megapixels > 0, will
scale photos before upload
If no filename given, will go through all files in DB"""
# Connect if we aren't already
if not self._connectToFlickr():
logger.error("%s - Couldn't connect to flickr")
return False
_tags=self._load_tags(directory)
_megapixels=self._load_megapixels(directory)
# If no files given, use files from DB in dir
if not files:
db=self._loadDB(directory)
files=db.keys()
#If only one file given, make it a list
if isinstance(files,basestring):
files=[files]
files.sort()
for filename in files:
#FIXME: If this fails, should send a list
# to Upload() about which files DID make it,
# so we don't have to upload it again!
status,replaced=self._upload_or_replace_flickr(directory,filename, \
_tags, _megapixels,resize_request)
if not status:
return False
# If uploaded OK, update photo properties, tags
# already taken care of - only update if
# this is a new photo (eg, if it was replaced
# then we don't need to do this
if not replaced:
self._update_config_location(directory,filename)
self._update_config_sets(directory,filename)
return True
|
[
"def",
"_upload_media",
"(",
"self",
",",
"directory",
",",
"files",
"=",
"None",
",",
"resize_request",
"=",
"None",
")",
":",
"# Connect if we aren't already",
"if",
"not",
"self",
".",
"_connectToFlickr",
"(",
")",
":",
"logger",
".",
"error",
"(",
"\"%s - Couldn't connect to flickr\"",
")",
"return",
"False",
"_tags",
"=",
"self",
".",
"_load_tags",
"(",
"directory",
")",
"_megapixels",
"=",
"self",
".",
"_load_megapixels",
"(",
"directory",
")",
"# If no files given, use files from DB in dir",
"if",
"not",
"files",
":",
"db",
"=",
"self",
".",
"_loadDB",
"(",
"directory",
")",
"files",
"=",
"db",
".",
"keys",
"(",
")",
"#If only one file given, make it a list",
"if",
"isinstance",
"(",
"files",
",",
"basestring",
")",
":",
"files",
"=",
"[",
"files",
"]",
"files",
".",
"sort",
"(",
")",
"for",
"filename",
"in",
"files",
":",
"#FIXME: If this fails, should send a list",
"# to Upload() about which files DID make it,",
"# so we don't have to upload it again!",
"status",
",",
"replaced",
"=",
"self",
".",
"_upload_or_replace_flickr",
"(",
"directory",
",",
"filename",
",",
"_tags",
",",
"_megapixels",
",",
"resize_request",
")",
"if",
"not",
"status",
":",
"return",
"False",
"# If uploaded OK, update photo properties, tags",
"# already taken care of - only update if",
"# this is a new photo (eg, if it was replaced",
"# then we don't need to do this",
"if",
"not",
"replaced",
":",
"self",
".",
"_update_config_location",
"(",
"directory",
",",
"filename",
")",
"self",
".",
"_update_config_sets",
"(",
"directory",
",",
"filename",
")",
"return",
"True"
] |
Uploads media file to FLICKR, returns True if
uploaded successfully, Will replace
if already uploaded, If megapixels > 0, will
scale photos before upload
If no filename given, will go through all files in DB
|
[
"Uploads",
"media",
"file",
"to",
"FLICKR",
"returns",
"True",
"if",
"uploaded",
"successfully",
"Will",
"replace",
"if",
"already",
"uploaded",
"If",
"megapixels",
">",
"0",
"will",
"scale",
"photos",
"before",
"upload",
"If",
"no",
"filename",
"given",
"will",
"go",
"through",
"all",
"files",
"in",
"DB"
] |
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
|
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/service_flickr.py#L610-L653
|
243,063
|
krukas/Trionyx
|
trionyx/quickstart/__init__.py
|
Quickstart.create_project
|
def create_project(self, project_path):
"""
Create Trionyx project in given path
:param str path: path to create project in.
:raises FileExistsError:
"""
shutil.copytree(self.project_path, project_path)
self.update_file(project_path, 'requirements.txt', {
'trionyx_version': trionyx.__version__
})
self.update_file(project_path, 'config/local_settings.py', {
'secret_key': utils.random_string(32)
})
|
python
|
def create_project(self, project_path):
"""
Create Trionyx project in given path
:param str path: path to create project in.
:raises FileExistsError:
"""
shutil.copytree(self.project_path, project_path)
self.update_file(project_path, 'requirements.txt', {
'trionyx_version': trionyx.__version__
})
self.update_file(project_path, 'config/local_settings.py', {
'secret_key': utils.random_string(32)
})
|
[
"def",
"create_project",
"(",
"self",
",",
"project_path",
")",
":",
"shutil",
".",
"copytree",
"(",
"self",
".",
"project_path",
",",
"project_path",
")",
"self",
".",
"update_file",
"(",
"project_path",
",",
"'requirements.txt'",
",",
"{",
"'trionyx_version'",
":",
"trionyx",
".",
"__version__",
"}",
")",
"self",
".",
"update_file",
"(",
"project_path",
",",
"'config/local_settings.py'",
",",
"{",
"'secret_key'",
":",
"utils",
".",
"random_string",
"(",
"32",
")",
"}",
")"
] |
Create Trionyx project in given path
:param str path: path to create project in.
:raises FileExistsError:
|
[
"Create",
"Trionyx",
"project",
"in",
"given",
"path"
] |
edac132cc0797190153f2e60bc7e88cb50e80da6
|
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/quickstart/__init__.py#L30-L45
|
243,064
|
krukas/Trionyx
|
trionyx/quickstart/__init__.py
|
Quickstart.create_app
|
def create_app(self, apps_path, name):
"""
Create Trionyx app in given path
:param str path: path to create app in.
:param str name: name of app
:raises FileExistsError:
"""
app_path = os.path.join(apps_path, name.lower())
shutil.copytree(self.app_path, app_path)
self.update_file(app_path, '__init__.py', {
'name': name.lower()
})
self.update_file(app_path, 'apps.py', {
'name': name.lower(),
'verbose_name': name.capitalize()
})
|
python
|
def create_app(self, apps_path, name):
"""
Create Trionyx app in given path
:param str path: path to create app in.
:param str name: name of app
:raises FileExistsError:
"""
app_path = os.path.join(apps_path, name.lower())
shutil.copytree(self.app_path, app_path)
self.update_file(app_path, '__init__.py', {
'name': name.lower()
})
self.update_file(app_path, 'apps.py', {
'name': name.lower(),
'verbose_name': name.capitalize()
})
|
[
"def",
"create_app",
"(",
"self",
",",
"apps_path",
",",
"name",
")",
":",
"app_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"apps_path",
",",
"name",
".",
"lower",
"(",
")",
")",
"shutil",
".",
"copytree",
"(",
"self",
".",
"app_path",
",",
"app_path",
")",
"self",
".",
"update_file",
"(",
"app_path",
",",
"'__init__.py'",
",",
"{",
"'name'",
":",
"name",
".",
"lower",
"(",
")",
"}",
")",
"self",
".",
"update_file",
"(",
"app_path",
",",
"'apps.py'",
",",
"{",
"'name'",
":",
"name",
".",
"lower",
"(",
")",
",",
"'verbose_name'",
":",
"name",
".",
"capitalize",
"(",
")",
"}",
")"
] |
Create Trionyx app in given path
:param str path: path to create app in.
:param str name: name of app
:raises FileExistsError:
|
[
"Create",
"Trionyx",
"app",
"in",
"given",
"path"
] |
edac132cc0797190153f2e60bc7e88cb50e80da6
|
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/quickstart/__init__.py#L47-L66
|
243,065
|
firstprayer/monsql
|
monsql/table.py
|
Table.find_one
|
def find_one(self, filter=None, fields=None, skip=0, sort=None):
"""
Similar to find. This method will only retrieve one row.
If no row matches, returns None
"""
result = self.find(filter=filter, fields=fields, skip=skip, limit=1, sort=sort)
if len(result) > 0:
return result[0]
else:
return None
|
python
|
def find_one(self, filter=None, fields=None, skip=0, sort=None):
"""
Similar to find. This method will only retrieve one row.
If no row matches, returns None
"""
result = self.find(filter=filter, fields=fields, skip=skip, limit=1, sort=sort)
if len(result) > 0:
return result[0]
else:
return None
|
[
"def",
"find_one",
"(",
"self",
",",
"filter",
"=",
"None",
",",
"fields",
"=",
"None",
",",
"skip",
"=",
"0",
",",
"sort",
"=",
"None",
")",
":",
"result",
"=",
"self",
".",
"find",
"(",
"filter",
"=",
"filter",
",",
"fields",
"=",
"fields",
",",
"skip",
"=",
"skip",
",",
"limit",
"=",
"1",
",",
"sort",
"=",
"sort",
")",
"if",
"len",
"(",
"result",
")",
">",
"0",
":",
"return",
"result",
"[",
"0",
"]",
"else",
":",
"return",
"None"
] |
Similar to find. This method will only retrieve one row.
If no row matches, returns None
|
[
"Similar",
"to",
"find",
".",
"This",
"method",
"will",
"only",
"retrieve",
"one",
"row",
".",
"If",
"no",
"row",
"matches",
"returns",
"None"
] |
6285c15b574c8664046eae2edfeb548c7b173efd
|
https://github.com/firstprayer/monsql/blob/6285c15b574c8664046eae2edfeb548c7b173efd/monsql/table.py#L158-L167
|
243,066
|
gear11/pypelogs
|
pypeout/mysql_out.py
|
MySQLOut.get_existing_keys
|
def get_existing_keys(self, events):
"""Returns the list of keys from the given event source that are already in the DB"""
data = [e[self.key] for e in events]
ss = ','.join(['%s' for _ in data])
query = 'SELECT %s FROM %s WHERE %s IN (%s)' % (self.key, self.table, self.key, ss)
cursor = self.conn.conn.cursor()
cursor.execute(query, data)
LOG.info("%s (data: %s)", query, data)
existing = [r[0] for r in cursor.fetchall()]
LOG.info("Existing IDs: %s" % existing)
return set(existing)
|
python
|
def get_existing_keys(self, events):
"""Returns the list of keys from the given event source that are already in the DB"""
data = [e[self.key] for e in events]
ss = ','.join(['%s' for _ in data])
query = 'SELECT %s FROM %s WHERE %s IN (%s)' % (self.key, self.table, self.key, ss)
cursor = self.conn.conn.cursor()
cursor.execute(query, data)
LOG.info("%s (data: %s)", query, data)
existing = [r[0] for r in cursor.fetchall()]
LOG.info("Existing IDs: %s" % existing)
return set(existing)
|
[
"def",
"get_existing_keys",
"(",
"self",
",",
"events",
")",
":",
"data",
"=",
"[",
"e",
"[",
"self",
".",
"key",
"]",
"for",
"e",
"in",
"events",
"]",
"ss",
"=",
"','",
".",
"join",
"(",
"[",
"'%s'",
"for",
"_",
"in",
"data",
"]",
")",
"query",
"=",
"'SELECT %s FROM %s WHERE %s IN (%s)'",
"%",
"(",
"self",
".",
"key",
",",
"self",
".",
"table",
",",
"self",
".",
"key",
",",
"ss",
")",
"cursor",
"=",
"self",
".",
"conn",
".",
"conn",
".",
"cursor",
"(",
")",
"cursor",
".",
"execute",
"(",
"query",
",",
"data",
")",
"LOG",
".",
"info",
"(",
"\"%s (data: %s)\"",
",",
"query",
",",
"data",
")",
"existing",
"=",
"[",
"r",
"[",
"0",
"]",
"for",
"r",
"in",
"cursor",
".",
"fetchall",
"(",
")",
"]",
"LOG",
".",
"info",
"(",
"\"Existing IDs: %s\"",
"%",
"existing",
")",
"return",
"set",
"(",
"existing",
")"
] |
Returns the list of keys from the given event source that are already in the DB
|
[
"Returns",
"the",
"list",
"of",
"keys",
"from",
"the",
"given",
"event",
"source",
"that",
"are",
"already",
"in",
"the",
"DB"
] |
da5dc0fee5373a4be294798b5e32cd0a803d8bbe
|
https://github.com/gear11/pypelogs/blob/da5dc0fee5373a4be294798b5e32cd0a803d8bbe/pypeout/mysql_out.py#L43-L53
|
243,067
|
gear11/pypelogs
|
pypeout/mysql_out.py
|
MySQLOut.insert
|
def insert(self, events):
"""Constructs and executes a MySQL insert for the given events."""
if not len(events):
return
keys = sorted(events[0].keys())
ss = ','.join(['%s' for _ in keys])
query = 'INSERT INTO %s (%s) VALUES ' % (self.table, ','.join(keys))
data = []
for event in events:
query += '(%s),' % ss
data += [event[k] for k in keys]
query = query[:-1] + ';'
LOG.info("%s (data: %s)", query, data)
conn = self.conn.conn
cursor = conn.cursor()
cursor.execute(query, data)
conn.commit()
|
python
|
def insert(self, events):
"""Constructs and executes a MySQL insert for the given events."""
if not len(events):
return
keys = sorted(events[0].keys())
ss = ','.join(['%s' for _ in keys])
query = 'INSERT INTO %s (%s) VALUES ' % (self.table, ','.join(keys))
data = []
for event in events:
query += '(%s),' % ss
data += [event[k] for k in keys]
query = query[:-1] + ';'
LOG.info("%s (data: %s)", query, data)
conn = self.conn.conn
cursor = conn.cursor()
cursor.execute(query, data)
conn.commit()
|
[
"def",
"insert",
"(",
"self",
",",
"events",
")",
":",
"if",
"not",
"len",
"(",
"events",
")",
":",
"return",
"keys",
"=",
"sorted",
"(",
"events",
"[",
"0",
"]",
".",
"keys",
"(",
")",
")",
"ss",
"=",
"','",
".",
"join",
"(",
"[",
"'%s'",
"for",
"_",
"in",
"keys",
"]",
")",
"query",
"=",
"'INSERT INTO %s (%s) VALUES '",
"%",
"(",
"self",
".",
"table",
",",
"','",
".",
"join",
"(",
"keys",
")",
")",
"data",
"=",
"[",
"]",
"for",
"event",
"in",
"events",
":",
"query",
"+=",
"'(%s),'",
"%",
"ss",
"data",
"+=",
"[",
"event",
"[",
"k",
"]",
"for",
"k",
"in",
"keys",
"]",
"query",
"=",
"query",
"[",
":",
"-",
"1",
"]",
"+",
"';'",
"LOG",
".",
"info",
"(",
"\"%s (data: %s)\"",
",",
"query",
",",
"data",
")",
"conn",
"=",
"self",
".",
"conn",
".",
"conn",
"cursor",
"=",
"conn",
".",
"cursor",
"(",
")",
"cursor",
".",
"execute",
"(",
"query",
",",
"data",
")",
"conn",
".",
"commit",
"(",
")"
] |
Constructs and executes a MySQL insert for the given events.
|
[
"Constructs",
"and",
"executes",
"a",
"MySQL",
"insert",
"for",
"the",
"given",
"events",
"."
] |
da5dc0fee5373a4be294798b5e32cd0a803d8bbe
|
https://github.com/gear11/pypelogs/blob/da5dc0fee5373a4be294798b5e32cd0a803d8bbe/pypeout/mysql_out.py#L55-L71
|
243,068
|
krukas/Trionyx
|
trionyx/renderer.py
|
datetime_value_renderer
|
def datetime_value_renderer(value, **options):
"""Render datetime value with django formats, default is SHORT_DATETIME_FORMAT"""
datetime_format = options.get('datetime_format', 'SHORT_DATETIME_FORMAT')
return formats.date_format(timezone.localtime(value), datetime_format)
|
python
|
def datetime_value_renderer(value, **options):
"""Render datetime value with django formats, default is SHORT_DATETIME_FORMAT"""
datetime_format = options.get('datetime_format', 'SHORT_DATETIME_FORMAT')
return formats.date_format(timezone.localtime(value), datetime_format)
|
[
"def",
"datetime_value_renderer",
"(",
"value",
",",
"*",
"*",
"options",
")",
":",
"datetime_format",
"=",
"options",
".",
"get",
"(",
"'datetime_format'",
",",
"'SHORT_DATETIME_FORMAT'",
")",
"return",
"formats",
".",
"date_format",
"(",
"timezone",
".",
"localtime",
"(",
"value",
")",
",",
"datetime_format",
")"
] |
Render datetime value with django formats, default is SHORT_DATETIME_FORMAT
|
[
"Render",
"datetime",
"value",
"with",
"django",
"formats",
"default",
"is",
"SHORT_DATETIME_FORMAT"
] |
edac132cc0797190153f2e60bc7e88cb50e80da6
|
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/renderer.py#L21-L24
|
243,069
|
krukas/Trionyx
|
trionyx/renderer.py
|
price_value_renderer
|
def price_value_renderer(value, currency=None, **options):
"""Format price value, with current locale and CURRENCY in settings"""
if not currency:
currency = getattr(settings, 'CURRENCY', 'USD')
return format_currency(value, currency, locale=utils.get_current_locale())
|
python
|
def price_value_renderer(value, currency=None, **options):
"""Format price value, with current locale and CURRENCY in settings"""
if not currency:
currency = getattr(settings, 'CURRENCY', 'USD')
return format_currency(value, currency, locale=utils.get_current_locale())
|
[
"def",
"price_value_renderer",
"(",
"value",
",",
"currency",
"=",
"None",
",",
"*",
"*",
"options",
")",
":",
"if",
"not",
"currency",
":",
"currency",
"=",
"getattr",
"(",
"settings",
",",
"'CURRENCY'",
",",
"'USD'",
")",
"return",
"format_currency",
"(",
"value",
",",
"currency",
",",
"locale",
"=",
"utils",
".",
"get_current_locale",
"(",
")",
")"
] |
Format price value, with current locale and CURRENCY in settings
|
[
"Format",
"price",
"value",
"with",
"current",
"locale",
"and",
"CURRENCY",
"in",
"settings"
] |
edac132cc0797190153f2e60bc7e88cb50e80da6
|
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/renderer.py#L32-L36
|
243,070
|
ThreshingFloor/libtf
|
libtf/logparsers/tf_log_base.py
|
TFLogBase.reduce
|
def reduce(self, show_noisy=False):
"""
Yield the reduced log lines
:param show_noisy: If this is true, shows the reduced log file. If this is false, it shows the logs that
were deleted.
"""
if not show_noisy:
for log in self.quiet_logs:
yield log['raw'].strip()
else:
for log in self.noisy_logs:
yield log['raw'].strip()
|
python
|
def reduce(self, show_noisy=False):
"""
Yield the reduced log lines
:param show_noisy: If this is true, shows the reduced log file. If this is false, it shows the logs that
were deleted.
"""
if not show_noisy:
for log in self.quiet_logs:
yield log['raw'].strip()
else:
for log in self.noisy_logs:
yield log['raw'].strip()
|
[
"def",
"reduce",
"(",
"self",
",",
"show_noisy",
"=",
"False",
")",
":",
"if",
"not",
"show_noisy",
":",
"for",
"log",
"in",
"self",
".",
"quiet_logs",
":",
"yield",
"log",
"[",
"'raw'",
"]",
".",
"strip",
"(",
")",
"else",
":",
"for",
"log",
"in",
"self",
".",
"noisy_logs",
":",
"yield",
"log",
"[",
"'raw'",
"]",
".",
"strip",
"(",
")"
] |
Yield the reduced log lines
:param show_noisy: If this is true, shows the reduced log file. If this is false, it shows the logs that
were deleted.
|
[
"Yield",
"the",
"reduced",
"log",
"lines"
] |
f1a8710f750639c9b9e2a468ece0d2923bf8c3df
|
https://github.com/ThreshingFloor/libtf/blob/f1a8710f750639c9b9e2a468ece0d2923bf8c3df/libtf/logparsers/tf_log_base.py#L80-L92
|
243,071
|
ThreshingFloor/libtf
|
libtf/logparsers/tf_log_base.py
|
TFLogBase._get_filter
|
def _get_filter(self, features):
"""
Gets the filter for the features in the object
:param features: The features of the syslog file
"""
# This chops the features up into smaller lists so the api can handle them
for ip_batch in (features['ips'][pos:pos + self.ip_query_batch_size]
for pos in six.moves.range(0, len(features['ips']), self.ip_query_batch_size)):
# Query for each chunk and add it to the filter list
query = {'ips': ip_batch, 'ports': features['ports']}
self.filter['ips'] += self._send_features(query)['ips']
|
python
|
def _get_filter(self, features):
"""
Gets the filter for the features in the object
:param features: The features of the syslog file
"""
# This chops the features up into smaller lists so the api can handle them
for ip_batch in (features['ips'][pos:pos + self.ip_query_batch_size]
for pos in six.moves.range(0, len(features['ips']), self.ip_query_batch_size)):
# Query for each chunk and add it to the filter list
query = {'ips': ip_batch, 'ports': features['ports']}
self.filter['ips'] += self._send_features(query)['ips']
|
[
"def",
"_get_filter",
"(",
"self",
",",
"features",
")",
":",
"# This chops the features up into smaller lists so the api can handle them",
"for",
"ip_batch",
"in",
"(",
"features",
"[",
"'ips'",
"]",
"[",
"pos",
":",
"pos",
"+",
"self",
".",
"ip_query_batch_size",
"]",
"for",
"pos",
"in",
"six",
".",
"moves",
".",
"range",
"(",
"0",
",",
"len",
"(",
"features",
"[",
"'ips'",
"]",
")",
",",
"self",
".",
"ip_query_batch_size",
")",
")",
":",
"# Query for each chunk and add it to the filter list",
"query",
"=",
"{",
"'ips'",
":",
"ip_batch",
",",
"'ports'",
":",
"features",
"[",
"'ports'",
"]",
"}",
"self",
".",
"filter",
"[",
"'ips'",
"]",
"+=",
"self",
".",
"_send_features",
"(",
"query",
")",
"[",
"'ips'",
"]"
] |
Gets the filter for the features in the object
:param features: The features of the syslog file
|
[
"Gets",
"the",
"filter",
"for",
"the",
"features",
"in",
"the",
"object"
] |
f1a8710f750639c9b9e2a468ece0d2923bf8c3df
|
https://github.com/ThreshingFloor/libtf/blob/f1a8710f750639c9b9e2a468ece0d2923bf8c3df/libtf/logparsers/tf_log_base.py#L94-L106
|
243,072
|
ThreshingFloor/libtf
|
libtf/logparsers/tf_log_base.py
|
TFLogBase._send_features
|
def _send_features(self, features):
"""
Send a query to the backend api with a list of observed features in this log file
:param features: Features found in the log file
:return: Response text from ThreshingFloor API
"""
# Hit the auth endpoint with a list of features
try:
r = requests.post(self.base_uri + self.api_endpoint, json=features, headers={'x-api-key': self.api_key})
except requests.exceptions.ConnectionError:
raise TFAPIUnavailable("The ThreshingFloor API appears to be unavailable.")
if r.status_code != 200:
sys.stderr.write("%s\n" % r.text)
raise TFAPIUnavailable("Request failed and returned a status of: {STATUS_CODE}"
.format(STATUS_CODE=r.status_code))
return json.loads(r.text)
|
python
|
def _send_features(self, features):
"""
Send a query to the backend api with a list of observed features in this log file
:param features: Features found in the log file
:return: Response text from ThreshingFloor API
"""
# Hit the auth endpoint with a list of features
try:
r = requests.post(self.base_uri + self.api_endpoint, json=features, headers={'x-api-key': self.api_key})
except requests.exceptions.ConnectionError:
raise TFAPIUnavailable("The ThreshingFloor API appears to be unavailable.")
if r.status_code != 200:
sys.stderr.write("%s\n" % r.text)
raise TFAPIUnavailable("Request failed and returned a status of: {STATUS_CODE}"
.format(STATUS_CODE=r.status_code))
return json.loads(r.text)
|
[
"def",
"_send_features",
"(",
"self",
",",
"features",
")",
":",
"# Hit the auth endpoint with a list of features",
"try",
":",
"r",
"=",
"requests",
".",
"post",
"(",
"self",
".",
"base_uri",
"+",
"self",
".",
"api_endpoint",
",",
"json",
"=",
"features",
",",
"headers",
"=",
"{",
"'x-api-key'",
":",
"self",
".",
"api_key",
"}",
")",
"except",
"requests",
".",
"exceptions",
".",
"ConnectionError",
":",
"raise",
"TFAPIUnavailable",
"(",
"\"The ThreshingFloor API appears to be unavailable.\"",
")",
"if",
"r",
".",
"status_code",
"!=",
"200",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"%s\\n\"",
"%",
"r",
".",
"text",
")",
"raise",
"TFAPIUnavailable",
"(",
"\"Request failed and returned a status of: {STATUS_CODE}\"",
".",
"format",
"(",
"STATUS_CODE",
"=",
"r",
".",
"status_code",
")",
")",
"return",
"json",
".",
"loads",
"(",
"r",
".",
"text",
")"
] |
Send a query to the backend api with a list of observed features in this log file
:param features: Features found in the log file
:return: Response text from ThreshingFloor API
|
[
"Send",
"a",
"query",
"to",
"the",
"backend",
"api",
"with",
"a",
"list",
"of",
"observed",
"features",
"in",
"this",
"log",
"file"
] |
f1a8710f750639c9b9e2a468ece0d2923bf8c3df
|
https://github.com/ThreshingFloor/libtf/blob/f1a8710f750639c9b9e2a468ece0d2923bf8c3df/libtf/logparsers/tf_log_base.py#L108-L127
|
243,073
|
crcresearch/py-utils
|
crc_nd/utils/file_io.py
|
clean_out_dir
|
def clean_out_dir(directory):
"""
Delete all the files and subdirectories in a directory.
"""
if not isinstance(directory, path):
directory = path(directory)
for file_path in directory.files():
file_path.remove()
for dir_path in directory.dirs():
dir_path.rmtree()
|
python
|
def clean_out_dir(directory):
"""
Delete all the files and subdirectories in a directory.
"""
if not isinstance(directory, path):
directory = path(directory)
for file_path in directory.files():
file_path.remove()
for dir_path in directory.dirs():
dir_path.rmtree()
|
[
"def",
"clean_out_dir",
"(",
"directory",
")",
":",
"if",
"not",
"isinstance",
"(",
"directory",
",",
"path",
")",
":",
"directory",
"=",
"path",
"(",
"directory",
")",
"for",
"file_path",
"in",
"directory",
".",
"files",
"(",
")",
":",
"file_path",
".",
"remove",
"(",
")",
"for",
"dir_path",
"in",
"directory",
".",
"dirs",
"(",
")",
":",
"dir_path",
".",
"rmtree",
"(",
")"
] |
Delete all the files and subdirectories in a directory.
|
[
"Delete",
"all",
"the",
"files",
"and",
"subdirectories",
"in",
"a",
"directory",
"."
] |
04caf0425a047baf900da726cf47c42413b0dd81
|
https://github.com/crcresearch/py-utils/blob/04caf0425a047baf900da726cf47c42413b0dd81/crc_nd/utils/file_io.py#L14-L23
|
243,074
|
davidwtbuxton/appengine.py
|
appengine.py
|
_extract_zip
|
def _extract_zip(archive, dest=None, members=None):
"""Extract the ZipInfo object to a real file on the path targetpath."""
# Python 2.5 compatibility.
dest = dest or os.getcwd()
members = members or archive.infolist()
for member in members:
if isinstance(member, basestring):
member = archive.getinfo(member)
_extract_zip_member(archive, member, dest)
|
python
|
def _extract_zip(archive, dest=None, members=None):
"""Extract the ZipInfo object to a real file on the path targetpath."""
# Python 2.5 compatibility.
dest = dest or os.getcwd()
members = members or archive.infolist()
for member in members:
if isinstance(member, basestring):
member = archive.getinfo(member)
_extract_zip_member(archive, member, dest)
|
[
"def",
"_extract_zip",
"(",
"archive",
",",
"dest",
"=",
"None",
",",
"members",
"=",
"None",
")",
":",
"# Python 2.5 compatibility.",
"dest",
"=",
"dest",
"or",
"os",
".",
"getcwd",
"(",
")",
"members",
"=",
"members",
"or",
"archive",
".",
"infolist",
"(",
")",
"for",
"member",
"in",
"members",
":",
"if",
"isinstance",
"(",
"member",
",",
"basestring",
")",
":",
"member",
"=",
"archive",
".",
"getinfo",
"(",
"member",
")",
"_extract_zip_member",
"(",
"archive",
",",
"member",
",",
"dest",
")"
] |
Extract the ZipInfo object to a real file on the path targetpath.
|
[
"Extract",
"the",
"ZipInfo",
"object",
"to",
"a",
"real",
"file",
"on",
"the",
"path",
"targetpath",
"."
] |
9ef666c282406b3d8799bfa4c70f99870b21a236
|
https://github.com/davidwtbuxton/appengine.py/blob/9ef666c282406b3d8799bfa4c70f99870b21a236/appengine.py#L23-L33
|
243,075
|
davidwtbuxton/appengine.py
|
appengine.py
|
make_parser
|
def make_parser():
"""Returns a new option parser."""
p = optparse.OptionParser()
p.add_option('--prefix', metavar='DIR', help='install SDK in DIR')
p.add_option('--bindir', metavar='DIR', help='install tools in DIR')
p.add_option('--force', action='store_true', default=False,
help='over-write existing installation')
p.add_option('--no-bindir', action='store_true', default=False,
help='do not install tools on DIR')
return p
|
python
|
def make_parser():
"""Returns a new option parser."""
p = optparse.OptionParser()
p.add_option('--prefix', metavar='DIR', help='install SDK in DIR')
p.add_option('--bindir', metavar='DIR', help='install tools in DIR')
p.add_option('--force', action='store_true', default=False,
help='over-write existing installation')
p.add_option('--no-bindir', action='store_true', default=False,
help='do not install tools on DIR')
return p
|
[
"def",
"make_parser",
"(",
")",
":",
"p",
"=",
"optparse",
".",
"OptionParser",
"(",
")",
"p",
".",
"add_option",
"(",
"'--prefix'",
",",
"metavar",
"=",
"'DIR'",
",",
"help",
"=",
"'install SDK in DIR'",
")",
"p",
".",
"add_option",
"(",
"'--bindir'",
",",
"metavar",
"=",
"'DIR'",
",",
"help",
"=",
"'install tools in DIR'",
")",
"p",
".",
"add_option",
"(",
"'--force'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"'over-write existing installation'",
")",
"p",
".",
"add_option",
"(",
"'--no-bindir'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"'do not install tools on DIR'",
")",
"return",
"p"
] |
Returns a new option parser.
|
[
"Returns",
"a",
"new",
"option",
"parser",
"."
] |
9ef666c282406b3d8799bfa4c70f99870b21a236
|
https://github.com/davidwtbuxton/appengine.py/blob/9ef666c282406b3d8799bfa4c70f99870b21a236/appengine.py#L60-L70
|
243,076
|
davidwtbuxton/appengine.py
|
appengine.py
|
check_version
|
def check_version(url=VERSION_URL):
"""Returns the version string for the latest SDK."""
for line in get(url):
if 'release:' in line:
return line.split(':')[-1].strip(' \'"\r\n')
|
python
|
def check_version(url=VERSION_URL):
"""Returns the version string for the latest SDK."""
for line in get(url):
if 'release:' in line:
return line.split(':')[-1].strip(' \'"\r\n')
|
[
"def",
"check_version",
"(",
"url",
"=",
"VERSION_URL",
")",
":",
"for",
"line",
"in",
"get",
"(",
"url",
")",
":",
"if",
"'release:'",
"in",
"line",
":",
"return",
"line",
".",
"split",
"(",
"':'",
")",
"[",
"-",
"1",
"]",
".",
"strip",
"(",
"' \\'\"\\r\\n'",
")"
] |
Returns the version string for the latest SDK.
|
[
"Returns",
"the",
"version",
"string",
"for",
"the",
"latest",
"SDK",
"."
] |
9ef666c282406b3d8799bfa4c70f99870b21a236
|
https://github.com/davidwtbuxton/appengine.py/blob/9ef666c282406b3d8799bfa4c70f99870b21a236/appengine.py#L92-L96
|
243,077
|
davidwtbuxton/appengine.py
|
appengine.py
|
parse_sdk_name
|
def parse_sdk_name(name):
"""Returns a filename or URL for the SDK name.
The name can be a version string, a remote URL or a local path.
"""
# Version like x.y.z, return as-is.
if all(part.isdigit() for part in name.split('.', 2)):
return DOWNLOAD_URL % name
# A network location.
url = urlparse.urlparse(name)
if url.scheme:
return name
# Else must be a filename.
return os.path.abspath(name)
|
python
|
def parse_sdk_name(name):
"""Returns a filename or URL for the SDK name.
The name can be a version string, a remote URL or a local path.
"""
# Version like x.y.z, return as-is.
if all(part.isdigit() for part in name.split('.', 2)):
return DOWNLOAD_URL % name
# A network location.
url = urlparse.urlparse(name)
if url.scheme:
return name
# Else must be a filename.
return os.path.abspath(name)
|
[
"def",
"parse_sdk_name",
"(",
"name",
")",
":",
"# Version like x.y.z, return as-is.",
"if",
"all",
"(",
"part",
".",
"isdigit",
"(",
")",
"for",
"part",
"in",
"name",
".",
"split",
"(",
"'.'",
",",
"2",
")",
")",
":",
"return",
"DOWNLOAD_URL",
"%",
"name",
"# A network location.",
"url",
"=",
"urlparse",
".",
"urlparse",
"(",
"name",
")",
"if",
"url",
".",
"scheme",
":",
"return",
"name",
"# Else must be a filename.",
"return",
"os",
".",
"path",
".",
"abspath",
"(",
"name",
")"
] |
Returns a filename or URL for the SDK name.
The name can be a version string, a remote URL or a local path.
|
[
"Returns",
"a",
"filename",
"or",
"URL",
"for",
"the",
"SDK",
"name",
"."
] |
9ef666c282406b3d8799bfa4c70f99870b21a236
|
https://github.com/davidwtbuxton/appengine.py/blob/9ef666c282406b3d8799bfa4c70f99870b21a236/appengine.py#L99-L114
|
243,078
|
davidwtbuxton/appengine.py
|
appengine.py
|
open_sdk
|
def open_sdk(url):
"""Open the SDK from the URL, which can be either a network location or
a filename path. Returns a file-like object open for reading.
"""
if urlparse.urlparse(url).scheme:
return _download(url)
else:
return open(url)
|
python
|
def open_sdk(url):
"""Open the SDK from the URL, which can be either a network location or
a filename path. Returns a file-like object open for reading.
"""
if urlparse.urlparse(url).scheme:
return _download(url)
else:
return open(url)
|
[
"def",
"open_sdk",
"(",
"url",
")",
":",
"if",
"urlparse",
".",
"urlparse",
"(",
"url",
")",
".",
"scheme",
":",
"return",
"_download",
"(",
"url",
")",
"else",
":",
"return",
"open",
"(",
"url",
")"
] |
Open the SDK from the URL, which can be either a network location or
a filename path. Returns a file-like object open for reading.
|
[
"Open",
"the",
"SDK",
"from",
"the",
"URL",
"which",
"can",
"be",
"either",
"a",
"network",
"location",
"or",
"a",
"filename",
"path",
".",
"Returns",
"a",
"file",
"-",
"like",
"object",
"open",
"for",
"reading",
"."
] |
9ef666c282406b3d8799bfa4c70f99870b21a236
|
https://github.com/davidwtbuxton/appengine.py/blob/9ef666c282406b3d8799bfa4c70f99870b21a236/appengine.py#L117-L124
|
243,079
|
bcho/bearychat-py
|
bearychat/incoming.py
|
Incoming.reset
|
def reset(self):
'''Reset stream.'''
self._text = None
self._markdown = False
self._channel = Incoming.DEFAULT_CHANNEL
self._attachments = []
return self
|
python
|
def reset(self):
'''Reset stream.'''
self._text = None
self._markdown = False
self._channel = Incoming.DEFAULT_CHANNEL
self._attachments = []
return self
|
[
"def",
"reset",
"(",
"self",
")",
":",
"self",
".",
"_text",
"=",
"None",
"self",
".",
"_markdown",
"=",
"False",
"self",
".",
"_channel",
"=",
"Incoming",
".",
"DEFAULT_CHANNEL",
"self",
".",
"_attachments",
"=",
"[",
"]",
"return",
"self"
] |
Reset stream.
|
[
"Reset",
"stream",
"."
] |
d492595d6334dfba511f82770995160ee12b5de1
|
https://github.com/bcho/bearychat-py/blob/d492595d6334dfba511f82770995160ee12b5de1/bearychat/incoming.py#L43-L50
|
243,080
|
bcho/bearychat-py
|
bearychat/incoming.py
|
Incoming.with_text
|
def with_text(self, text, markdown=None):
'''Set text content.
:param text: text content.
:param markdown: is markdown? Defaults to ``False``.
'''
self._text = text
self._markdown = markdown or False
return self
|
python
|
def with_text(self, text, markdown=None):
'''Set text content.
:param text: text content.
:param markdown: is markdown? Defaults to ``False``.
'''
self._text = text
self._markdown = markdown or False
return self
|
[
"def",
"with_text",
"(",
"self",
",",
"text",
",",
"markdown",
"=",
"None",
")",
":",
"self",
".",
"_text",
"=",
"text",
"self",
".",
"_markdown",
"=",
"markdown",
"or",
"False",
"return",
"self"
] |
Set text content.
:param text: text content.
:param markdown: is markdown? Defaults to ``False``.
|
[
"Set",
"text",
"content",
"."
] |
d492595d6334dfba511f82770995160ee12b5de1
|
https://github.com/bcho/bearychat-py/blob/d492595d6334dfba511f82770995160ee12b5de1/bearychat/incoming.py#L61-L70
|
243,081
|
bcho/bearychat-py
|
bearychat/incoming.py
|
Incoming.push
|
def push(self):
'''Deliver the message.'''
message = self.build_message()
return requests.post(self.hook, json=message)
|
python
|
def push(self):
'''Deliver the message.'''
message = self.build_message()
return requests.post(self.hook, json=message)
|
[
"def",
"push",
"(",
"self",
")",
":",
"message",
"=",
"self",
".",
"build_message",
"(",
")",
"return",
"requests",
".",
"post",
"(",
"self",
".",
"hook",
",",
"json",
"=",
"message",
")"
] |
Deliver the message.
|
[
"Deliver",
"the",
"message",
"."
] |
d492595d6334dfba511f82770995160ee12b5de1
|
https://github.com/bcho/bearychat-py/blob/d492595d6334dfba511f82770995160ee12b5de1/bearychat/incoming.py#L113-L116
|
243,082
|
luismasuelli/python-cantrips
|
cantrips/patterns/identify.py
|
List.create
|
def create(self, key, *args, **kwargs):
"""
Creates and inserts an identified object with the passed params
using the specified class.
"""
instance = self._class(key, *args, **kwargs)
self._events.create.trigger(list=self, instance=instance, key=key, args=args, kwargs=kwargs)
return self.insert(instance)
|
python
|
def create(self, key, *args, **kwargs):
"""
Creates and inserts an identified object with the passed params
using the specified class.
"""
instance = self._class(key, *args, **kwargs)
self._events.create.trigger(list=self, instance=instance, key=key, args=args, kwargs=kwargs)
return self.insert(instance)
|
[
"def",
"create",
"(",
"self",
",",
"key",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"instance",
"=",
"self",
".",
"_class",
"(",
"key",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"_events",
".",
"create",
".",
"trigger",
"(",
"list",
"=",
"self",
",",
"instance",
"=",
"instance",
",",
"key",
"=",
"key",
",",
"args",
"=",
"args",
",",
"kwargs",
"=",
"kwargs",
")",
"return",
"self",
".",
"insert",
"(",
"instance",
")"
] |
Creates and inserts an identified object with the passed params
using the specified class.
|
[
"Creates",
"and",
"inserts",
"an",
"identified",
"object",
"with",
"the",
"passed",
"params",
"using",
"the",
"specified",
"class",
"."
] |
dba2742c1d1a60863bb65f4a291464f6e68eb2ee
|
https://github.com/luismasuelli/python-cantrips/blob/dba2742c1d1a60863bb65f4a291464f6e68eb2ee/cantrips/patterns/identify.py#L53-L60
|
243,083
|
luismasuelli/python-cantrips
|
cantrips/patterns/identify.py
|
List.insert
|
def insert(self, identified):
"""
Inserts an already-created identified object of the expected class.
"""
if not isinstance(identified, self._class):
raise self.Error("Passed instance is not of the needed class",
self.Error.INVALID_INSTANCE_CLASS, instance=identified)
try:
if self._objects[identified.key] != identified:
raise self.Error("Passes instance's key '%s' is already occupied" % identified.key,
self.Error.KEY_EXISTS, key=identified.key, instance=identified)
except KeyError:
self._objects[identified.key] = identified
self._events.insert.trigger(list=self, instance=identified)
return identified
|
python
|
def insert(self, identified):
"""
Inserts an already-created identified object of the expected class.
"""
if not isinstance(identified, self._class):
raise self.Error("Passed instance is not of the needed class",
self.Error.INVALID_INSTANCE_CLASS, instance=identified)
try:
if self._objects[identified.key] != identified:
raise self.Error("Passes instance's key '%s' is already occupied" % identified.key,
self.Error.KEY_EXISTS, key=identified.key, instance=identified)
except KeyError:
self._objects[identified.key] = identified
self._events.insert.trigger(list=self, instance=identified)
return identified
|
[
"def",
"insert",
"(",
"self",
",",
"identified",
")",
":",
"if",
"not",
"isinstance",
"(",
"identified",
",",
"self",
".",
"_class",
")",
":",
"raise",
"self",
".",
"Error",
"(",
"\"Passed instance is not of the needed class\"",
",",
"self",
".",
"Error",
".",
"INVALID_INSTANCE_CLASS",
",",
"instance",
"=",
"identified",
")",
"try",
":",
"if",
"self",
".",
"_objects",
"[",
"identified",
".",
"key",
"]",
"!=",
"identified",
":",
"raise",
"self",
".",
"Error",
"(",
"\"Passes instance's key '%s' is already occupied\"",
"%",
"identified",
".",
"key",
",",
"self",
".",
"Error",
".",
"KEY_EXISTS",
",",
"key",
"=",
"identified",
".",
"key",
",",
"instance",
"=",
"identified",
")",
"except",
"KeyError",
":",
"self",
".",
"_objects",
"[",
"identified",
".",
"key",
"]",
"=",
"identified",
"self",
".",
"_events",
".",
"insert",
".",
"trigger",
"(",
"list",
"=",
"self",
",",
"instance",
"=",
"identified",
")",
"return",
"identified"
] |
Inserts an already-created identified object of the expected class.
|
[
"Inserts",
"an",
"already",
"-",
"created",
"identified",
"object",
"of",
"the",
"expected",
"class",
"."
] |
dba2742c1d1a60863bb65f4a291464f6e68eb2ee
|
https://github.com/luismasuelli/python-cantrips/blob/dba2742c1d1a60863bb65f4a291464f6e68eb2ee/cantrips/patterns/identify.py#L62-L78
|
243,084
|
luismasuelli/python-cantrips
|
cantrips/patterns/identify.py
|
List.remove
|
def remove(self, identified):
"""
Removes an already-created identified object.
A key may be passed instead of an identified object.
If an object is passed, and its key is held by another
object inside the record, an error is triggered.
Returns the removed object.
"""
by_val = isinstance(identified, Identified)
if by_val:
key = identified.key
if not isinstance(identified, self._class):
raise self.Error("Such instance could never exist here",
self.Error.INVALID_INSTANCE_CLASS, instance=identified)
else:
key = identified
try:
popped = self._objects.pop(key)
if by_val and popped != identified:
raise self.Error("Trying to pop a different object which also has key '%s'" % popped.key,
self.Error.NOT_SAME_OBJECT, instance=identified, current=popped)
self._events.remove.trigger(list=self, instance=identified, by_val=by_val)
except KeyError:
raise self.Error("No object with key '%s' exists here",
self.Error.KEY_NOT_EXISTS, key=key, instance=identified if by_val else None)
|
python
|
def remove(self, identified):
"""
Removes an already-created identified object.
A key may be passed instead of an identified object.
If an object is passed, and its key is held by another
object inside the record, an error is triggered.
Returns the removed object.
"""
by_val = isinstance(identified, Identified)
if by_val:
key = identified.key
if not isinstance(identified, self._class):
raise self.Error("Such instance could never exist here",
self.Error.INVALID_INSTANCE_CLASS, instance=identified)
else:
key = identified
try:
popped = self._objects.pop(key)
if by_val and popped != identified:
raise self.Error("Trying to pop a different object which also has key '%s'" % popped.key,
self.Error.NOT_SAME_OBJECT, instance=identified, current=popped)
self._events.remove.trigger(list=self, instance=identified, by_val=by_val)
except KeyError:
raise self.Error("No object with key '%s' exists here",
self.Error.KEY_NOT_EXISTS, key=key, instance=identified if by_val else None)
|
[
"def",
"remove",
"(",
"self",
",",
"identified",
")",
":",
"by_val",
"=",
"isinstance",
"(",
"identified",
",",
"Identified",
")",
"if",
"by_val",
":",
"key",
"=",
"identified",
".",
"key",
"if",
"not",
"isinstance",
"(",
"identified",
",",
"self",
".",
"_class",
")",
":",
"raise",
"self",
".",
"Error",
"(",
"\"Such instance could never exist here\"",
",",
"self",
".",
"Error",
".",
"INVALID_INSTANCE_CLASS",
",",
"instance",
"=",
"identified",
")",
"else",
":",
"key",
"=",
"identified",
"try",
":",
"popped",
"=",
"self",
".",
"_objects",
".",
"pop",
"(",
"key",
")",
"if",
"by_val",
"and",
"popped",
"!=",
"identified",
":",
"raise",
"self",
".",
"Error",
"(",
"\"Trying to pop a different object which also has key '%s'\"",
"%",
"popped",
".",
"key",
",",
"self",
".",
"Error",
".",
"NOT_SAME_OBJECT",
",",
"instance",
"=",
"identified",
",",
"current",
"=",
"popped",
")",
"self",
".",
"_events",
".",
"remove",
".",
"trigger",
"(",
"list",
"=",
"self",
",",
"instance",
"=",
"identified",
",",
"by_val",
"=",
"by_val",
")",
"except",
"KeyError",
":",
"raise",
"self",
".",
"Error",
"(",
"\"No object with key '%s' exists here\"",
",",
"self",
".",
"Error",
".",
"KEY_NOT_EXISTS",
",",
"key",
"=",
"key",
",",
"instance",
"=",
"identified",
"if",
"by_val",
"else",
"None",
")"
] |
Removes an already-created identified object.
A key may be passed instead of an identified object.
If an object is passed, and its key is held by another
object inside the record, an error is triggered.
Returns the removed object.
|
[
"Removes",
"an",
"already",
"-",
"created",
"identified",
"object",
".",
"A",
"key",
"may",
"be",
"passed",
"instead",
"of",
"an",
"identified",
"object",
".",
"If",
"an",
"object",
"is",
"passed",
"and",
"its",
"key",
"is",
"held",
"by",
"another",
"object",
"inside",
"the",
"record",
"an",
"error",
"is",
"triggered",
".",
"Returns",
"the",
"removed",
"object",
"."
] |
dba2742c1d1a60863bb65f4a291464f6e68eb2ee
|
https://github.com/luismasuelli/python-cantrips/blob/dba2742c1d1a60863bb65f4a291464f6e68eb2ee/cantrips/patterns/identify.py#L80-L106
|
243,085
|
pydron/twistit
|
twistit/_timeout.py
|
timeout_deferred
|
def timeout_deferred(deferred, timeout, error_message="Timeout occured"):
"""
Waits a given time, if the given deferred hasn't called back
by then we cancel it. If the deferred was cancelled by the timeout,
a `TimeoutError` error is produced.
Returns `deferred`.
"""
timeout_occured = [False]
def got_result(result):
if not timeout_occured[0]:
# Deferred called back before the timeout.
delayedCall.cancel()
return result
else:
if isinstance(result, failure.Failure) and result.check(defer.CancelledError):
# Got a `CancelledError` after we called `cancel()`.
# Replace it with a `TimeoutError`.
raise TimeoutError(error_message)
else:
# Apparently the given deferred has something else to tell us.
# It might be that it completed before the `cancel()` had an effect
# (or as a result thereof). It might also be that it triggered an
# error. In either case, we want this to be visible down-stream.
return result
def time_is_up():
timeout_occured[0] = True
deferred.cancel()
delayedCall = reactor.callLater(timeout, time_is_up)
deferred.addBoth(got_result)
return deferred
|
python
|
def timeout_deferred(deferred, timeout, error_message="Timeout occured"):
"""
Waits a given time, if the given deferred hasn't called back
by then we cancel it. If the deferred was cancelled by the timeout,
a `TimeoutError` error is produced.
Returns `deferred`.
"""
timeout_occured = [False]
def got_result(result):
if not timeout_occured[0]:
# Deferred called back before the timeout.
delayedCall.cancel()
return result
else:
if isinstance(result, failure.Failure) and result.check(defer.CancelledError):
# Got a `CancelledError` after we called `cancel()`.
# Replace it with a `TimeoutError`.
raise TimeoutError(error_message)
else:
# Apparently the given deferred has something else to tell us.
# It might be that it completed before the `cancel()` had an effect
# (or as a result thereof). It might also be that it triggered an
# error. In either case, we want this to be visible down-stream.
return result
def time_is_up():
timeout_occured[0] = True
deferred.cancel()
delayedCall = reactor.callLater(timeout, time_is_up)
deferred.addBoth(got_result)
return deferred
|
[
"def",
"timeout_deferred",
"(",
"deferred",
",",
"timeout",
",",
"error_message",
"=",
"\"Timeout occured\"",
")",
":",
"timeout_occured",
"=",
"[",
"False",
"]",
"def",
"got_result",
"(",
"result",
")",
":",
"if",
"not",
"timeout_occured",
"[",
"0",
"]",
":",
"# Deferred called back before the timeout.",
"delayedCall",
".",
"cancel",
"(",
")",
"return",
"result",
"else",
":",
"if",
"isinstance",
"(",
"result",
",",
"failure",
".",
"Failure",
")",
"and",
"result",
".",
"check",
"(",
"defer",
".",
"CancelledError",
")",
":",
"# Got a `CancelledError` after we called `cancel()`.",
"# Replace it with a `TimeoutError`.",
"raise",
"TimeoutError",
"(",
"error_message",
")",
"else",
":",
"# Apparently the given deferred has something else to tell us.",
"# It might be that it completed before the `cancel()` had an effect",
"# (or as a result thereof). It might also be that it triggered an",
"# error. In either case, we want this to be visible down-stream.",
"return",
"result",
"def",
"time_is_up",
"(",
")",
":",
"timeout_occured",
"[",
"0",
"]",
"=",
"True",
"deferred",
".",
"cancel",
"(",
")",
"delayedCall",
"=",
"reactor",
".",
"callLater",
"(",
"timeout",
",",
"time_is_up",
")",
"deferred",
".",
"addBoth",
"(",
"got_result",
")",
"return",
"deferred"
] |
Waits a given time, if the given deferred hasn't called back
by then we cancel it. If the deferred was cancelled by the timeout,
a `TimeoutError` error is produced.
Returns `deferred`.
|
[
"Waits",
"a",
"given",
"time",
"if",
"the",
"given",
"deferred",
"hasn",
"t",
"called",
"back",
"by",
"then",
"we",
"cancel",
"it",
".",
"If",
"the",
"deferred",
"was",
"cancelled",
"by",
"the",
"timeout",
"a",
"TimeoutError",
"error",
"is",
"produced",
".",
"Returns",
"deferred",
"."
] |
23ac43b830083fd32c400fcdfa5300e302769c74
|
https://github.com/pydron/twistit/blob/23ac43b830083fd32c400fcdfa5300e302769c74/twistit/_timeout.py#L24-L58
|
243,086
|
bitlabstudio/django-unshorten
|
unshorten/auth.py
|
SimpleAuthentication.http_auth
|
def http_auth(self):
"""
Returns ``True`` if valid http auth credentials are found in the
request header.
"""
if 'HTTP_AUTHORIZATION' in self.request.META.keys():
authmeth, auth = self.request.META['HTTP_AUTHORIZATION'].split(
' ', 1)
if authmeth.lower() == 'basic':
auth = auth.strip().decode('base64')
identifier, password = auth.split(':', 1)
username = get_username(identifier)
user = authenticate(username=username, password=password)
if user:
login(self.request, user)
return True
|
python
|
def http_auth(self):
"""
Returns ``True`` if valid http auth credentials are found in the
request header.
"""
if 'HTTP_AUTHORIZATION' in self.request.META.keys():
authmeth, auth = self.request.META['HTTP_AUTHORIZATION'].split(
' ', 1)
if authmeth.lower() == 'basic':
auth = auth.strip().decode('base64')
identifier, password = auth.split(':', 1)
username = get_username(identifier)
user = authenticate(username=username, password=password)
if user:
login(self.request, user)
return True
|
[
"def",
"http_auth",
"(",
"self",
")",
":",
"if",
"'HTTP_AUTHORIZATION'",
"in",
"self",
".",
"request",
".",
"META",
".",
"keys",
"(",
")",
":",
"authmeth",
",",
"auth",
"=",
"self",
".",
"request",
".",
"META",
"[",
"'HTTP_AUTHORIZATION'",
"]",
".",
"split",
"(",
"' '",
",",
"1",
")",
"if",
"authmeth",
".",
"lower",
"(",
")",
"==",
"'basic'",
":",
"auth",
"=",
"auth",
".",
"strip",
"(",
")",
".",
"decode",
"(",
"'base64'",
")",
"identifier",
",",
"password",
"=",
"auth",
".",
"split",
"(",
"':'",
",",
"1",
")",
"username",
"=",
"get_username",
"(",
"identifier",
")",
"user",
"=",
"authenticate",
"(",
"username",
"=",
"username",
",",
"password",
"=",
"password",
")",
"if",
"user",
":",
"login",
"(",
"self",
".",
"request",
",",
"user",
")",
"return",
"True"
] |
Returns ``True`` if valid http auth credentials are found in the
request header.
|
[
"Returns",
"True",
"if",
"valid",
"http",
"auth",
"credentials",
"are",
"found",
"in",
"the",
"request",
"header",
"."
] |
6d184de908bb9df3aad5ac3fd9732d976afb6953
|
https://github.com/bitlabstudio/django-unshorten/blob/6d184de908bb9df3aad5ac3fd9732d976afb6953/unshorten/auth.py#L38-L54
|
243,087
|
hactar-is/frink
|
frink/__init__.py
|
Frink.init
|
def init(self, db=RDB_DB, host=RDB_HOST, port=RDB_PORT):
"""Create the Frink object to store the connection credentials."""
self.RDB_HOST = host
self.RDB_PORT = port
self.RDB_DB = db
from .connection import RethinkDB
self.rdb = RethinkDB()
self.rdb.init()
|
python
|
def init(self, db=RDB_DB, host=RDB_HOST, port=RDB_PORT):
"""Create the Frink object to store the connection credentials."""
self.RDB_HOST = host
self.RDB_PORT = port
self.RDB_DB = db
from .connection import RethinkDB
self.rdb = RethinkDB()
self.rdb.init()
|
[
"def",
"init",
"(",
"self",
",",
"db",
"=",
"RDB_DB",
",",
"host",
"=",
"RDB_HOST",
",",
"port",
"=",
"RDB_PORT",
")",
":",
"self",
".",
"RDB_HOST",
"=",
"host",
"self",
".",
"RDB_PORT",
"=",
"port",
"self",
".",
"RDB_DB",
"=",
"db",
"from",
".",
"connection",
"import",
"RethinkDB",
"self",
".",
"rdb",
"=",
"RethinkDB",
"(",
")",
"self",
".",
"rdb",
".",
"init",
"(",
")"
] |
Create the Frink object to store the connection credentials.
|
[
"Create",
"the",
"Frink",
"object",
"to",
"store",
"the",
"connection",
"credentials",
"."
] |
0d2c11daca8ef6d4365e98914bdc0bc65478ae72
|
https://github.com/hactar-is/frink/blob/0d2c11daca8ef6d4365e98914bdc0bc65478ae72/frink/__init__.py#L17-L25
|
243,088
|
zerok/zs.bibtex
|
src/zs/bibtex/parser.py
|
normalize_value
|
def normalize_value(text):
"""
This removes newlines and multiple spaces from a string.
"""
result = text.replace('\n', ' ')
result = re.subn('[ ]{2,}', ' ', result)[0]
return result
|
python
|
def normalize_value(text):
"""
This removes newlines and multiple spaces from a string.
"""
result = text.replace('\n', ' ')
result = re.subn('[ ]{2,}', ' ', result)[0]
return result
|
[
"def",
"normalize_value",
"(",
"text",
")",
":",
"result",
"=",
"text",
".",
"replace",
"(",
"'\\n'",
",",
"' '",
")",
"result",
"=",
"re",
".",
"subn",
"(",
"'[ ]{2,}'",
",",
"' '",
",",
"result",
")",
"[",
"0",
"]",
"return",
"result"
] |
This removes newlines and multiple spaces from a string.
|
[
"This",
"removes",
"newlines",
"and",
"multiple",
"spaces",
"from",
"a",
"string",
"."
] |
ac9594b36b9d4ec9a3593c0dc7c1a35e5aea04d9
|
https://github.com/zerok/zs.bibtex/blob/ac9594b36b9d4ec9a3593c0dc7c1a35e5aea04d9/src/zs/bibtex/parser.py#L16-L22
|
243,089
|
zerok/zs.bibtex
|
src/zs/bibtex/parser.py
|
parse_field
|
def parse_field(source, loc, tokens):
"""
Returns the tokens of a field as key-value pair.
"""
name = tokens[0].lower()
value = normalize_value(tokens[2])
if name == 'author' and ' and ' in value:
value = [field.strip() for field in value.split(' and ')]
return (name, value)
|
python
|
def parse_field(source, loc, tokens):
"""
Returns the tokens of a field as key-value pair.
"""
name = tokens[0].lower()
value = normalize_value(tokens[2])
if name == 'author' and ' and ' in value:
value = [field.strip() for field in value.split(' and ')]
return (name, value)
|
[
"def",
"parse_field",
"(",
"source",
",",
"loc",
",",
"tokens",
")",
":",
"name",
"=",
"tokens",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"value",
"=",
"normalize_value",
"(",
"tokens",
"[",
"2",
"]",
")",
"if",
"name",
"==",
"'author'",
"and",
"' and '",
"in",
"value",
":",
"value",
"=",
"[",
"field",
".",
"strip",
"(",
")",
"for",
"field",
"in",
"value",
".",
"split",
"(",
"' and '",
")",
"]",
"return",
"(",
"name",
",",
"value",
")"
] |
Returns the tokens of a field as key-value pair.
|
[
"Returns",
"the",
"tokens",
"of",
"a",
"field",
"as",
"key",
"-",
"value",
"pair",
"."
] |
ac9594b36b9d4ec9a3593c0dc7c1a35e5aea04d9
|
https://github.com/zerok/zs.bibtex/blob/ac9594b36b9d4ec9a3593c0dc7c1a35e5aea04d9/src/zs/bibtex/parser.py#L27-L35
|
243,090
|
zerok/zs.bibtex
|
src/zs/bibtex/parser.py
|
parse_entry
|
def parse_entry(source, loc, tokens):
"""
Converts the tokens of an entry into an Entry instance. If no applicable
type is available, an UnsupportedEntryType exception is raised.
"""
type_ = tokens[1].lower()
entry_type = structures.TypeRegistry.get_type(type_)
if entry_type is None or not issubclass(entry_type, structures.Entry):
raise exceptions.UnsupportedEntryType(
"%s is not a supported entry type" % type_
)
new_entry = entry_type()
new_entry.name = tokens[3]
for key, value in [t for t in tokens[4:-1] if t != ',']:
new_entry[key] = value
return new_entry
|
python
|
def parse_entry(source, loc, tokens):
"""
Converts the tokens of an entry into an Entry instance. If no applicable
type is available, an UnsupportedEntryType exception is raised.
"""
type_ = tokens[1].lower()
entry_type = structures.TypeRegistry.get_type(type_)
if entry_type is None or not issubclass(entry_type, structures.Entry):
raise exceptions.UnsupportedEntryType(
"%s is not a supported entry type" % type_
)
new_entry = entry_type()
new_entry.name = tokens[3]
for key, value in [t for t in tokens[4:-1] if t != ',']:
new_entry[key] = value
return new_entry
|
[
"def",
"parse_entry",
"(",
"source",
",",
"loc",
",",
"tokens",
")",
":",
"type_",
"=",
"tokens",
"[",
"1",
"]",
".",
"lower",
"(",
")",
"entry_type",
"=",
"structures",
".",
"TypeRegistry",
".",
"get_type",
"(",
"type_",
")",
"if",
"entry_type",
"is",
"None",
"or",
"not",
"issubclass",
"(",
"entry_type",
",",
"structures",
".",
"Entry",
")",
":",
"raise",
"exceptions",
".",
"UnsupportedEntryType",
"(",
"\"%s is not a supported entry type\"",
"%",
"type_",
")",
"new_entry",
"=",
"entry_type",
"(",
")",
"new_entry",
".",
"name",
"=",
"tokens",
"[",
"3",
"]",
"for",
"key",
",",
"value",
"in",
"[",
"t",
"for",
"t",
"in",
"tokens",
"[",
"4",
":",
"-",
"1",
"]",
"if",
"t",
"!=",
"','",
"]",
":",
"new_entry",
"[",
"key",
"]",
"=",
"value",
"return",
"new_entry"
] |
Converts the tokens of an entry into an Entry instance. If no applicable
type is available, an UnsupportedEntryType exception is raised.
|
[
"Converts",
"the",
"tokens",
"of",
"an",
"entry",
"into",
"an",
"Entry",
"instance",
".",
"If",
"no",
"applicable",
"type",
"is",
"available",
"an",
"UnsupportedEntryType",
"exception",
"is",
"raised",
"."
] |
ac9594b36b9d4ec9a3593c0dc7c1a35e5aea04d9
|
https://github.com/zerok/zs.bibtex/blob/ac9594b36b9d4ec9a3593c0dc7c1a35e5aea04d9/src/zs/bibtex/parser.py#L37-L52
|
243,091
|
zerok/zs.bibtex
|
src/zs/bibtex/parser.py
|
parse_bibliography
|
def parse_bibliography(source, loc, tokens):
"""
Combines the parsed entries into a Bibliography instance.
"""
bib = structures.Bibliography()
for entry in tokens:
bib.add(entry)
return bib
|
python
|
def parse_bibliography(source, loc, tokens):
"""
Combines the parsed entries into a Bibliography instance.
"""
bib = structures.Bibliography()
for entry in tokens:
bib.add(entry)
return bib
|
[
"def",
"parse_bibliography",
"(",
"source",
",",
"loc",
",",
"tokens",
")",
":",
"bib",
"=",
"structures",
".",
"Bibliography",
"(",
")",
"for",
"entry",
"in",
"tokens",
":",
"bib",
".",
"add",
"(",
"entry",
")",
"return",
"bib"
] |
Combines the parsed entries into a Bibliography instance.
|
[
"Combines",
"the",
"parsed",
"entries",
"into",
"a",
"Bibliography",
"instance",
"."
] |
ac9594b36b9d4ec9a3593c0dc7c1a35e5aea04d9
|
https://github.com/zerok/zs.bibtex/blob/ac9594b36b9d4ec9a3593c0dc7c1a35e5aea04d9/src/zs/bibtex/parser.py#L54-L61
|
243,092
|
zerok/zs.bibtex
|
src/zs/bibtex/parser.py
|
parse_string
|
def parse_string(str_, validate=False):
"""
Tries to parse a given string into a Bibliography instance. If ``validate``
is passed as keyword argument and set to ``True``, the Bibliography
will be validated using the standard rules.
"""
result = pattern.parseString(str_)[0]
if validate:
result.validate()
return result
|
python
|
def parse_string(str_, validate=False):
"""
Tries to parse a given string into a Bibliography instance. If ``validate``
is passed as keyword argument and set to ``True``, the Bibliography
will be validated using the standard rules.
"""
result = pattern.parseString(str_)[0]
if validate:
result.validate()
return result
|
[
"def",
"parse_string",
"(",
"str_",
",",
"validate",
"=",
"False",
")",
":",
"result",
"=",
"pattern",
".",
"parseString",
"(",
"str_",
")",
"[",
"0",
"]",
"if",
"validate",
":",
"result",
".",
"validate",
"(",
")",
"return",
"result"
] |
Tries to parse a given string into a Bibliography instance. If ``validate``
is passed as keyword argument and set to ``True``, the Bibliography
will be validated using the standard rules.
|
[
"Tries",
"to",
"parse",
"a",
"given",
"string",
"into",
"a",
"Bibliography",
"instance",
".",
"If",
"validate",
"is",
"passed",
"as",
"keyword",
"argument",
"and",
"set",
"to",
"True",
"the",
"Bibliography",
"will",
"be",
"validated",
"using",
"the",
"standard",
"rules",
"."
] |
ac9594b36b9d4ec9a3593c0dc7c1a35e5aea04d9
|
https://github.com/zerok/zs.bibtex/blob/ac9594b36b9d4ec9a3593c0dc7c1a35e5aea04d9/src/zs/bibtex/parser.py#L103-L112
|
243,093
|
zerok/zs.bibtex
|
src/zs/bibtex/parser.py
|
parse_file
|
def parse_file(file_or_path, encoding='utf-8', validate=False):
"""
Tries to parse a given filepath or fileobj into a Bibliography instance. If
``validate`` is passed as keyword argument and set to ``True``, the
Bibliography will be validated using the standard rules.
"""
try:
is_string = isinstance(file_or_path, basestring)
except NameError:
is_string = isinstance(file_or_path, str)
if is_string:
with codecs.open(file_or_path, 'r', encoding) as file_:
result = pattern.parseFile(file_)[0]
else:
result = pattern.parseFile(file_or_path)[0]
if validate:
result.validate()
return result
|
python
|
def parse_file(file_or_path, encoding='utf-8', validate=False):
"""
Tries to parse a given filepath or fileobj into a Bibliography instance. If
``validate`` is passed as keyword argument and set to ``True``, the
Bibliography will be validated using the standard rules.
"""
try:
is_string = isinstance(file_or_path, basestring)
except NameError:
is_string = isinstance(file_or_path, str)
if is_string:
with codecs.open(file_or_path, 'r', encoding) as file_:
result = pattern.parseFile(file_)[0]
else:
result = pattern.parseFile(file_or_path)[0]
if validate:
result.validate()
return result
|
[
"def",
"parse_file",
"(",
"file_or_path",
",",
"encoding",
"=",
"'utf-8'",
",",
"validate",
"=",
"False",
")",
":",
"try",
":",
"is_string",
"=",
"isinstance",
"(",
"file_or_path",
",",
"basestring",
")",
"except",
"NameError",
":",
"is_string",
"=",
"isinstance",
"(",
"file_or_path",
",",
"str",
")",
"if",
"is_string",
":",
"with",
"codecs",
".",
"open",
"(",
"file_or_path",
",",
"'r'",
",",
"encoding",
")",
"as",
"file_",
":",
"result",
"=",
"pattern",
".",
"parseFile",
"(",
"file_",
")",
"[",
"0",
"]",
"else",
":",
"result",
"=",
"pattern",
".",
"parseFile",
"(",
"file_or_path",
")",
"[",
"0",
"]",
"if",
"validate",
":",
"result",
".",
"validate",
"(",
")",
"return",
"result"
] |
Tries to parse a given filepath or fileobj into a Bibliography instance. If
``validate`` is passed as keyword argument and set to ``True``, the
Bibliography will be validated using the standard rules.
|
[
"Tries",
"to",
"parse",
"a",
"given",
"filepath",
"or",
"fileobj",
"into",
"a",
"Bibliography",
"instance",
".",
"If",
"validate",
"is",
"passed",
"as",
"keyword",
"argument",
"and",
"set",
"to",
"True",
"the",
"Bibliography",
"will",
"be",
"validated",
"using",
"the",
"standard",
"rules",
"."
] |
ac9594b36b9d4ec9a3593c0dc7c1a35e5aea04d9
|
https://github.com/zerok/zs.bibtex/blob/ac9594b36b9d4ec9a3593c0dc7c1a35e5aea04d9/src/zs/bibtex/parser.py#L115-L132
|
243,094
|
luismasuelli/python-cantrips
|
cantrips/types/record.py
|
TrackableRecord.track_end
|
def track_end(self):
"""
Ends tracking of attributes changes.
Returns the changes that occurred to the attributes.
Only the final state of each attribute is obtained
"""
self.__tracking = False
changes = self.__changes
self.__changes = {}
return changes
|
python
|
def track_end(self):
"""
Ends tracking of attributes changes.
Returns the changes that occurred to the attributes.
Only the final state of each attribute is obtained
"""
self.__tracking = False
changes = self.__changes
self.__changes = {}
return changes
|
[
"def",
"track_end",
"(",
"self",
")",
":",
"self",
".",
"__tracking",
"=",
"False",
"changes",
"=",
"self",
".",
"__changes",
"self",
".",
"__changes",
"=",
"{",
"}",
"return",
"changes"
] |
Ends tracking of attributes changes.
Returns the changes that occurred to the attributes.
Only the final state of each attribute is obtained
|
[
"Ends",
"tracking",
"of",
"attributes",
"changes",
".",
"Returns",
"the",
"changes",
"that",
"occurred",
"to",
"the",
"attributes",
".",
"Only",
"the",
"final",
"state",
"of",
"each",
"attribute",
"is",
"obtained"
] |
dba2742c1d1a60863bb65f4a291464f6e68eb2ee
|
https://github.com/luismasuelli/python-cantrips/blob/dba2742c1d1a60863bb65f4a291464f6e68eb2ee/cantrips/types/record.py#L33-L42
|
243,095
|
lvh/maxims
|
maxims/creation.py
|
creationTime
|
def creationTime(item):
"""
Returns the creation time of the given item.
"""
forThisItem = _CreationTime.createdItem == item
return item.store.findUnique(_CreationTime, forThisItem).timestamp
|
python
|
def creationTime(item):
"""
Returns the creation time of the given item.
"""
forThisItem = _CreationTime.createdItem == item
return item.store.findUnique(_CreationTime, forThisItem).timestamp
|
[
"def",
"creationTime",
"(",
"item",
")",
":",
"forThisItem",
"=",
"_CreationTime",
".",
"createdItem",
"==",
"item",
"return",
"item",
".",
"store",
".",
"findUnique",
"(",
"_CreationTime",
",",
"forThisItem",
")",
".",
"timestamp"
] |
Returns the creation time of the given item.
|
[
"Returns",
"the",
"creation",
"time",
"of",
"the",
"given",
"item",
"."
] |
5c53b25d2cc4ccecbfe90193ade9ce0dbfbe4623
|
https://github.com/lvh/maxims/blob/5c53b25d2cc4ccecbfe90193ade9ce0dbfbe4623/maxims/creation.py#L21-L26
|
243,096
|
davehughes/righthook
|
righthook/views.py
|
access_key
|
def access_key(root, key, sep='.', default=None):
'''
Look up a key in a potentially nested object `root` by its `sep`-separated
path. Returns `default` if the key is not found.
Example:
access_key({'foo': {'bar': 1}}, 'foo.bar') -> 1
'''
props = key.split('.')
props.reverse()
while props and root:
prop = props.pop()
root = access(root, prop, default=default)
return root
|
python
|
def access_key(root, key, sep='.', default=None):
'''
Look up a key in a potentially nested object `root` by its `sep`-separated
path. Returns `default` if the key is not found.
Example:
access_key({'foo': {'bar': 1}}, 'foo.bar') -> 1
'''
props = key.split('.')
props.reverse()
while props and root:
prop = props.pop()
root = access(root, prop, default=default)
return root
|
[
"def",
"access_key",
"(",
"root",
",",
"key",
",",
"sep",
"=",
"'.'",
",",
"default",
"=",
"None",
")",
":",
"props",
"=",
"key",
".",
"split",
"(",
"'.'",
")",
"props",
".",
"reverse",
"(",
")",
"while",
"props",
"and",
"root",
":",
"prop",
"=",
"props",
".",
"pop",
"(",
")",
"root",
"=",
"access",
"(",
"root",
",",
"prop",
",",
"default",
"=",
"default",
")",
"return",
"root"
] |
Look up a key in a potentially nested object `root` by its `sep`-separated
path. Returns `default` if the key is not found.
Example:
access_key({'foo': {'bar': 1}}, 'foo.bar') -> 1
|
[
"Look",
"up",
"a",
"key",
"in",
"a",
"potentially",
"nested",
"object",
"root",
"by",
"its",
"sep",
"-",
"separated",
"path",
".",
"Returns",
"default",
"if",
"the",
"key",
"is",
"not",
"found",
"."
] |
d40e8dea0b6ce53b3099af8ed5cb1d4dadda9dba
|
https://github.com/davehughes/righthook/blob/d40e8dea0b6ce53b3099af8ed5cb1d4dadda9dba/righthook/views.py#L17-L30
|
243,097
|
jpablo128/simplystatic
|
bin/s2.py
|
dispatch
|
def dispatch(argdict):
'''Call the command-specific function, depending on the command.'''
cmd = argdict['command']
ftc = getattr(THIS_MODULE, 'do_'+cmd)
ftc(argdict)
|
python
|
def dispatch(argdict):
'''Call the command-specific function, depending on the command.'''
cmd = argdict['command']
ftc = getattr(THIS_MODULE, 'do_'+cmd)
ftc(argdict)
|
[
"def",
"dispatch",
"(",
"argdict",
")",
":",
"cmd",
"=",
"argdict",
"[",
"'command'",
"]",
"ftc",
"=",
"getattr",
"(",
"THIS_MODULE",
",",
"'do_'",
"+",
"cmd",
")",
"ftc",
"(",
"argdict",
")"
] |
Call the command-specific function, depending on the command.
|
[
"Call",
"the",
"command",
"-",
"specific",
"function",
"depending",
"on",
"the",
"command",
"."
] |
91ac579c8f34fa240bef9b87adb0116c6b40b24d
|
https://github.com/jpablo128/simplystatic/blob/91ac579c8f34fa240bef9b87adb0116c6b40b24d/bin/s2.py#L116-L120
|
243,098
|
jpablo128/simplystatic
|
bin/s2.py
|
do_init
|
def do_init(argdict):
'''Create the structure of a s2site.'''
site = make_site_obj(argdict)
try:
site.init_structure()
print "Initialized directory."
if argdict['randomsite']:
#all_tags = ['tag1','tag2','tag3','tag4']
for i in range(1,argdict['numpages']+1):
#ptags = random.sample(all_tags,random.randint(1,len(all_tags)))
p = site.random_page()
p.set_published()
p.write()
print "added page ",p.slug
except ValueError: # pragma: no cover
print "Cannot create structure. You're already within an s2 \
tree, or the directory is not empty or it is not writeable. "
|
python
|
def do_init(argdict):
'''Create the structure of a s2site.'''
site = make_site_obj(argdict)
try:
site.init_structure()
print "Initialized directory."
if argdict['randomsite']:
#all_tags = ['tag1','tag2','tag3','tag4']
for i in range(1,argdict['numpages']+1):
#ptags = random.sample(all_tags,random.randint(1,len(all_tags)))
p = site.random_page()
p.set_published()
p.write()
print "added page ",p.slug
except ValueError: # pragma: no cover
print "Cannot create structure. You're already within an s2 \
tree, or the directory is not empty or it is not writeable. "
|
[
"def",
"do_init",
"(",
"argdict",
")",
":",
"site",
"=",
"make_site_obj",
"(",
"argdict",
")",
"try",
":",
"site",
".",
"init_structure",
"(",
")",
"print",
"\"Initialized directory.\"",
"if",
"argdict",
"[",
"'randomsite'",
"]",
":",
"#all_tags = ['tag1','tag2','tag3','tag4']",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"argdict",
"[",
"'numpages'",
"]",
"+",
"1",
")",
":",
"#ptags = random.sample(all_tags,random.randint(1,len(all_tags)))",
"p",
"=",
"site",
".",
"random_page",
"(",
")",
"p",
".",
"set_published",
"(",
")",
"p",
".",
"write",
"(",
")",
"print",
"\"added page \"",
",",
"p",
".",
"slug",
"except",
"ValueError",
":",
"# pragma: no cover",
"print",
"\"Cannot create structure. You're already within an s2 \\\ntree, or the directory is not empty or it is not writeable. \""
] |
Create the structure of a s2site.
|
[
"Create",
"the",
"structure",
"of",
"a",
"s2site",
"."
] |
91ac579c8f34fa240bef9b87adb0116c6b40b24d
|
https://github.com/jpablo128/simplystatic/blob/91ac579c8f34fa240bef9b87adb0116c6b40b24d/bin/s2.py#L122-L138
|
243,099
|
jpablo128/simplystatic
|
bin/s2.py
|
do_add
|
def do_add(argdict):
'''Add a new page to the site.'''
site = make_site_obj(argdict)
if not site.tree_ready:
print "Cannot add page. You are not within a simplystatic \
tree and you didn't specify a directory."
sys.exit()
title = argdict['title']
try:
new_page = site.add_page(title)
new_page.write()
print "Added page '"+ title + "'"
except ValueError as e: # pragma: no cover
print "Attempted to create a page which already exists."
sys.exit()
|
python
|
def do_add(argdict):
'''Add a new page to the site.'''
site = make_site_obj(argdict)
if not site.tree_ready:
print "Cannot add page. You are not within a simplystatic \
tree and you didn't specify a directory."
sys.exit()
title = argdict['title']
try:
new_page = site.add_page(title)
new_page.write()
print "Added page '"+ title + "'"
except ValueError as e: # pragma: no cover
print "Attempted to create a page which already exists."
sys.exit()
|
[
"def",
"do_add",
"(",
"argdict",
")",
":",
"site",
"=",
"make_site_obj",
"(",
"argdict",
")",
"if",
"not",
"site",
".",
"tree_ready",
":",
"print",
"\"Cannot add page. You are not within a simplystatic \\\ntree and you didn't specify a directory.\"",
"sys",
".",
"exit",
"(",
")",
"title",
"=",
"argdict",
"[",
"'title'",
"]",
"try",
":",
"new_page",
"=",
"site",
".",
"add_page",
"(",
"title",
")",
"new_page",
".",
"write",
"(",
")",
"print",
"\"Added page '\"",
"+",
"title",
"+",
"\"'\"",
"except",
"ValueError",
"as",
"e",
":",
"# pragma: no cover",
"print",
"\"Attempted to create a page which already exists.\"",
"sys",
".",
"exit",
"(",
")"
] |
Add a new page to the site.
|
[
"Add",
"a",
"new",
"page",
"to",
"the",
"site",
"."
] |
91ac579c8f34fa240bef9b87adb0116c6b40b24d
|
https://github.com/jpablo128/simplystatic/blob/91ac579c8f34fa240bef9b87adb0116c6b40b24d/bin/s2.py#L140-L154
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.