id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
240,500
|
inveniosoftware-contrib/record-recommender
|
record_recommender/profiles.py
|
Profiles.create_profiles
|
def create_profiles(self, prefix, weeks, ip_user=False):
"""Create the user profiles for the given weeks."""
# Future: Add a time range in weeks for how long a user is considered
# as the same user.
# Count accessed records
record_counter = {}
for year, week in weeks:
file = self.storage.get(prefix, year, week)
self.count_records(record_counter, file)
# TODO: Statistics, count records
print("Records read all: {}".format(self.stat))
# Filter records with to less/much views.
records_valid = self.filter_counter(record_counter)
# Create user profiles
profiles = defaultdict(list)
for year, week in weeks:
file = self.storage.get(prefix, year, week)
self._create_user_profiles(profiles, file, records_valid, ip_user,
year, week)
return profiles
|
python
|
def create_profiles(self, prefix, weeks, ip_user=False):
"""Create the user profiles for the given weeks."""
# Future: Add a time range in weeks for how long a user is considered
# as the same user.
# Count accessed records
record_counter = {}
for year, week in weeks:
file = self.storage.get(prefix, year, week)
self.count_records(record_counter, file)
# TODO: Statistics, count records
print("Records read all: {}".format(self.stat))
# Filter records with to less/much views.
records_valid = self.filter_counter(record_counter)
# Create user profiles
profiles = defaultdict(list)
for year, week in weeks:
file = self.storage.get(prefix, year, week)
self._create_user_profiles(profiles, file, records_valid, ip_user,
year, week)
return profiles
|
[
"def",
"create_profiles",
"(",
"self",
",",
"prefix",
",",
"weeks",
",",
"ip_user",
"=",
"False",
")",
":",
"# Future: Add a time range in weeks for how long a user is considered",
"# as the same user.",
"# Count accessed records",
"record_counter",
"=",
"{",
"}",
"for",
"year",
",",
"week",
"in",
"weeks",
":",
"file",
"=",
"self",
".",
"storage",
".",
"get",
"(",
"prefix",
",",
"year",
",",
"week",
")",
"self",
".",
"count_records",
"(",
"record_counter",
",",
"file",
")",
"# TODO: Statistics, count records",
"print",
"(",
"\"Records read all: {}\"",
".",
"format",
"(",
"self",
".",
"stat",
")",
")",
"# Filter records with to less/much views.",
"records_valid",
"=",
"self",
".",
"filter_counter",
"(",
"record_counter",
")",
"# Create user profiles",
"profiles",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"year",
",",
"week",
"in",
"weeks",
":",
"file",
"=",
"self",
".",
"storage",
".",
"get",
"(",
"prefix",
",",
"year",
",",
"week",
")",
"self",
".",
"_create_user_profiles",
"(",
"profiles",
",",
"file",
",",
"records_valid",
",",
"ip_user",
",",
"year",
",",
"week",
")",
"return",
"profiles"
] |
Create the user profiles for the given weeks.
|
[
"Create",
"the",
"user",
"profiles",
"for",
"the",
"given",
"weeks",
"."
] |
07f71e783369e6373218b5e6ba0bf15901e9251a
|
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/profiles.py#L103-L127
|
240,501
|
inveniosoftware-contrib/record-recommender
|
record_recommender/profiles.py
|
Profiles.count_records
|
def count_records(self, record_counter, file):
"""Count the number of viewed records."""
counter = record_counter
events_counter = 0
for record in file.get_records():
recid = record[2]
counter[recid] = counter.get(recid, 0) + 1
events_counter += 1
self.stat['user_record_events'] = events_counter
return counter
|
python
|
def count_records(self, record_counter, file):
"""Count the number of viewed records."""
counter = record_counter
events_counter = 0
for record in file.get_records():
recid = record[2]
counter[recid] = counter.get(recid, 0) + 1
events_counter += 1
self.stat['user_record_events'] = events_counter
return counter
|
[
"def",
"count_records",
"(",
"self",
",",
"record_counter",
",",
"file",
")",
":",
"counter",
"=",
"record_counter",
"events_counter",
"=",
"0",
"for",
"record",
"in",
"file",
".",
"get_records",
"(",
")",
":",
"recid",
"=",
"record",
"[",
"2",
"]",
"counter",
"[",
"recid",
"]",
"=",
"counter",
".",
"get",
"(",
"recid",
",",
"0",
")",
"+",
"1",
"events_counter",
"+=",
"1",
"self",
".",
"stat",
"[",
"'user_record_events'",
"]",
"=",
"events_counter",
"return",
"counter"
] |
Count the number of viewed records.
|
[
"Count",
"the",
"number",
"of",
"viewed",
"records",
"."
] |
07f71e783369e6373218b5e6ba0bf15901e9251a
|
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/profiles.py#L129-L139
|
240,502
|
inveniosoftware-contrib/record-recommender
|
record_recommender/profiles.py
|
Profiles.filter_counter
|
def filter_counter(self, counter, min=2, max=100000000):
"""
Filter the counted records.
Returns: List with record numbers.
"""
records_filterd = {}
counter_all_records = 0
for item in counter:
counter_all_records += 1
if max > counter[item] >= min:
records_filterd[item] = counter[item]
self.stat['user_record_events'] = counter_all_records
self.stat['records_filtered'] = len(records_filterd)
return records_filterd
|
python
|
def filter_counter(self, counter, min=2, max=100000000):
"""
Filter the counted records.
Returns: List with record numbers.
"""
records_filterd = {}
counter_all_records = 0
for item in counter:
counter_all_records += 1
if max > counter[item] >= min:
records_filterd[item] = counter[item]
self.stat['user_record_events'] = counter_all_records
self.stat['records_filtered'] = len(records_filterd)
return records_filterd
|
[
"def",
"filter_counter",
"(",
"self",
",",
"counter",
",",
"min",
"=",
"2",
",",
"max",
"=",
"100000000",
")",
":",
"records_filterd",
"=",
"{",
"}",
"counter_all_records",
"=",
"0",
"for",
"item",
"in",
"counter",
":",
"counter_all_records",
"+=",
"1",
"if",
"max",
">",
"counter",
"[",
"item",
"]",
">=",
"min",
":",
"records_filterd",
"[",
"item",
"]",
"=",
"counter",
"[",
"item",
"]",
"self",
".",
"stat",
"[",
"'user_record_events'",
"]",
"=",
"counter_all_records",
"self",
".",
"stat",
"[",
"'records_filtered'",
"]",
"=",
"len",
"(",
"records_filterd",
")",
"return",
"records_filterd"
] |
Filter the counted records.
Returns: List with record numbers.
|
[
"Filter",
"the",
"counted",
"records",
"."
] |
07f71e783369e6373218b5e6ba0bf15901e9251a
|
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/profiles.py#L141-L156
|
240,503
|
inveniosoftware-contrib/record-recommender
|
record_recommender/profiles.py
|
Profiles._create_user_profiles
|
def _create_user_profiles(self, profiles, file, valid_records,
ip_user=False, year=None, week=None):
"""
Create user profiles with all the records visited or downloaded.
Returns: Dictionary with the user id and a record list.
{'2323': [1, 2, 4]}
"""
for record in file.get_records():
recid = record[2]
if not valid_records.get(recid, None):
# Record not valid
continue
if ip_user:
ip = record[4]
user_agent = record[5]
# Generate unique user id
user_id = "{0}-{1}_{2}_{3}".format(year, week, ip, user_agent)
try:
uid = hashlib.md5(user_id.encode('utf-8')).hexdigest()
except UnicodeDecodeError:
logger.info("UnicodeDecodeError {}".format(user_id))
else:
uid = record[1]
profiles[uid].append(recid)
return profiles
|
python
|
def _create_user_profiles(self, profiles, file, valid_records,
ip_user=False, year=None, week=None):
"""
Create user profiles with all the records visited or downloaded.
Returns: Dictionary with the user id and a record list.
{'2323': [1, 2, 4]}
"""
for record in file.get_records():
recid = record[2]
if not valid_records.get(recid, None):
# Record not valid
continue
if ip_user:
ip = record[4]
user_agent = record[5]
# Generate unique user id
user_id = "{0}-{1}_{2}_{3}".format(year, week, ip, user_agent)
try:
uid = hashlib.md5(user_id.encode('utf-8')).hexdigest()
except UnicodeDecodeError:
logger.info("UnicodeDecodeError {}".format(user_id))
else:
uid = record[1]
profiles[uid].append(recid)
return profiles
|
[
"def",
"_create_user_profiles",
"(",
"self",
",",
"profiles",
",",
"file",
",",
"valid_records",
",",
"ip_user",
"=",
"False",
",",
"year",
"=",
"None",
",",
"week",
"=",
"None",
")",
":",
"for",
"record",
"in",
"file",
".",
"get_records",
"(",
")",
":",
"recid",
"=",
"record",
"[",
"2",
"]",
"if",
"not",
"valid_records",
".",
"get",
"(",
"recid",
",",
"None",
")",
":",
"# Record not valid",
"continue",
"if",
"ip_user",
":",
"ip",
"=",
"record",
"[",
"4",
"]",
"user_agent",
"=",
"record",
"[",
"5",
"]",
"# Generate unique user id",
"user_id",
"=",
"\"{0}-{1}_{2}_{3}\"",
".",
"format",
"(",
"year",
",",
"week",
",",
"ip",
",",
"user_agent",
")",
"try",
":",
"uid",
"=",
"hashlib",
".",
"md5",
"(",
"user_id",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"hexdigest",
"(",
")",
"except",
"UnicodeDecodeError",
":",
"logger",
".",
"info",
"(",
"\"UnicodeDecodeError {}\"",
".",
"format",
"(",
"user_id",
")",
")",
"else",
":",
"uid",
"=",
"record",
"[",
"1",
"]",
"profiles",
"[",
"uid",
"]",
".",
"append",
"(",
"recid",
")",
"return",
"profiles"
] |
Create user profiles with all the records visited or downloaded.
Returns: Dictionary with the user id and a record list.
{'2323': [1, 2, 4]}
|
[
"Create",
"user",
"profiles",
"with",
"all",
"the",
"records",
"visited",
"or",
"downloaded",
"."
] |
07f71e783369e6373218b5e6ba0bf15901e9251a
|
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/profiles.py#L158-L186
|
240,504
|
noobermin/lspreader
|
bin/nearest.py
|
handle_dims
|
def handle_dims(opts):
'''
Script option handling.
'''
use,res = [],[];
if opts['--X']:
use.append('x');
res.append(int(opts['--xres']));
if opts['--Y']:
use.append('y');
res.append(int(opts['--yres']));
if opts['--Z']:
use.append('z');
res.append(int(opts['--zres']));
if use == []:
use = ['x','y','z'];
res = map(lambda k: int(opts[k]),['--xres','--yres','--zres']);
# A couple of things to note; written in this way, whatever
# this list (and thus, what is read) becomes, it is ordered
# alphabetically. This is important, as this determines what
# each resulting row and column and breadth in the output
# array corresponds to from the actual simulation.
#
# It is probably worth mentioning that the xz in simulation
# axes will be [0,1] in numpy axes, that is, it will be left-handed.
# Using xz leads to this anyway, but it's worth reminding the reader.
# To permute in 2D, use the --permute flag.
return use,res;
|
python
|
def handle_dims(opts):
'''
Script option handling.
'''
use,res = [],[];
if opts['--X']:
use.append('x');
res.append(int(opts['--xres']));
if opts['--Y']:
use.append('y');
res.append(int(opts['--yres']));
if opts['--Z']:
use.append('z');
res.append(int(opts['--zres']));
if use == []:
use = ['x','y','z'];
res = map(lambda k: int(opts[k]),['--xres','--yres','--zres']);
# A couple of things to note; written in this way, whatever
# this list (and thus, what is read) becomes, it is ordered
# alphabetically. This is important, as this determines what
# each resulting row and column and breadth in the output
# array corresponds to from the actual simulation.
#
# It is probably worth mentioning that the xz in simulation
# axes will be [0,1] in numpy axes, that is, it will be left-handed.
# Using xz leads to this anyway, but it's worth reminding the reader.
# To permute in 2D, use the --permute flag.
return use,res;
|
[
"def",
"handle_dims",
"(",
"opts",
")",
":",
"use",
",",
"res",
"=",
"[",
"]",
",",
"[",
"]",
"if",
"opts",
"[",
"'--X'",
"]",
":",
"use",
".",
"append",
"(",
"'x'",
")",
"res",
".",
"append",
"(",
"int",
"(",
"opts",
"[",
"'--xres'",
"]",
")",
")",
"if",
"opts",
"[",
"'--Y'",
"]",
":",
"use",
".",
"append",
"(",
"'y'",
")",
"res",
".",
"append",
"(",
"int",
"(",
"opts",
"[",
"'--yres'",
"]",
")",
")",
"if",
"opts",
"[",
"'--Z'",
"]",
":",
"use",
".",
"append",
"(",
"'z'",
")",
"res",
".",
"append",
"(",
"int",
"(",
"opts",
"[",
"'--zres'",
"]",
")",
")",
"if",
"use",
"==",
"[",
"]",
":",
"use",
"=",
"[",
"'x'",
",",
"'y'",
",",
"'z'",
"]",
"res",
"=",
"map",
"(",
"lambda",
"k",
":",
"int",
"(",
"opts",
"[",
"k",
"]",
")",
",",
"[",
"'--xres'",
",",
"'--yres'",
",",
"'--zres'",
"]",
")",
"# A couple of things to note; written in this way, whatever",
"# this list (and thus, what is read) becomes, it is ordered",
"# alphabetically. This is important, as this determines what",
"# each resulting row and column and breadth in the output",
"# array corresponds to from the actual simulation.",
"#",
"# It is probably worth mentioning that the xz in simulation",
"# axes will be [0,1] in numpy axes, that is, it will be left-handed.",
"# Using xz leads to this anyway, but it's worth reminding the reader.",
"# To permute in 2D, use the --permute flag.",
"return",
"use",
",",
"res"
] |
Script option handling.
|
[
"Script",
"option",
"handling",
"."
] |
903b9d6427513b07986ffacf76cbca54e18d8be6
|
https://github.com/noobermin/lspreader/blob/903b9d6427513b07986ffacf76cbca54e18d8be6/bin/nearest.py#L40-L67
|
240,505
|
jpscaletti/moar
|
moar/optimage.py
|
_temporary_filenames
|
def _temporary_filenames(total):
"""Context manager to create temporary files and remove them after use."""
temp_files = [_get_temporary_filename('optimage-') for i in range(total)]
yield temp_files
for temp_file in temp_files:
try:
os.remove(temp_file)
except OSError:
# Continue in case we could not remove the file. One reason is that
# the fail was never created.
pass
|
python
|
def _temporary_filenames(total):
"""Context manager to create temporary files and remove them after use."""
temp_files = [_get_temporary_filename('optimage-') for i in range(total)]
yield temp_files
for temp_file in temp_files:
try:
os.remove(temp_file)
except OSError:
# Continue in case we could not remove the file. One reason is that
# the fail was never created.
pass
|
[
"def",
"_temporary_filenames",
"(",
"total",
")",
":",
"temp_files",
"=",
"[",
"_get_temporary_filename",
"(",
"'optimage-'",
")",
"for",
"i",
"in",
"range",
"(",
"total",
")",
"]",
"yield",
"temp_files",
"for",
"temp_file",
"in",
"temp_files",
":",
"try",
":",
"os",
".",
"remove",
"(",
"temp_file",
")",
"except",
"OSError",
":",
"# Continue in case we could not remove the file. One reason is that",
"# the fail was never created.",
"pass"
] |
Context manager to create temporary files and remove them after use.
|
[
"Context",
"manager",
"to",
"create",
"temporary",
"files",
"and",
"remove",
"them",
"after",
"use",
"."
] |
22694e5671b6adaccc4c9c87db7bdd701d20e734
|
https://github.com/jpscaletti/moar/blob/22694e5671b6adaccc4c9c87db7bdd701d20e734/moar/optimage.py#L67-L77
|
240,506
|
jpscaletti/moar
|
moar/optimage.py
|
_process
|
def _process(compressor, input_filename, output_filename):
"""Helper function to compress an image.
Returns:
_CompressorResult named tuple, with the resulting size, the name of the
output file and the name of the compressor.
"""
compressor(input_filename, output_filename)
result_size = os.path.getsize(output_filename)
return _CompressorResult(result_size, output_filename, compressor.__name__)
|
python
|
def _process(compressor, input_filename, output_filename):
"""Helper function to compress an image.
Returns:
_CompressorResult named tuple, with the resulting size, the name of the
output file and the name of the compressor.
"""
compressor(input_filename, output_filename)
result_size = os.path.getsize(output_filename)
return _CompressorResult(result_size, output_filename, compressor.__name__)
|
[
"def",
"_process",
"(",
"compressor",
",",
"input_filename",
",",
"output_filename",
")",
":",
"compressor",
"(",
"input_filename",
",",
"output_filename",
")",
"result_size",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"output_filename",
")",
"return",
"_CompressorResult",
"(",
"result_size",
",",
"output_filename",
",",
"compressor",
".",
"__name__",
")"
] |
Helper function to compress an image.
Returns:
_CompressorResult named tuple, with the resulting size, the name of the
output file and the name of the compressor.
|
[
"Helper",
"function",
"to",
"compress",
"an",
"image",
"."
] |
22694e5671b6adaccc4c9c87db7bdd701d20e734
|
https://github.com/jpscaletti/moar/blob/22694e5671b6adaccc4c9c87db7bdd701d20e734/moar/optimage.py#L127-L137
|
240,507
|
jpscaletti/moar
|
moar/optimage.py
|
_compress_with
|
def _compress_with(input_filename, output_filename, compressors):
"""Helper function to compress an image with several compressors.
In case the compressors do not improve the filesize or in case the resulting
image is not equivalent to the source, then the output will be a copy of the
input.
"""
with _temporary_filenames(len(compressors)) as temp_filenames:
results = []
for compressor, temp_filename in zip(compressors, temp_filenames):
results.append(_process(compressor, input_filename, temp_filename))
best_result = min(results)
os.rename(best_result.filename, output_filename)
best_compressor = best_result.compressor
if best_result.size >= os.path.getsize(input_filename):
best_compressor = None
if (best_compressor is not None and
not _images_are_equal(input_filename, output_filename)):
logging.info('Compressor "%s" generated an invalid image for "%s"',
best_compressor, input_filename)
best_compressor = None
if best_compressor is None:
shutil.copy(input_filename, output_filename)
logging.info('%s: best compressor for "%s"', best_compressor,
input_filename)
|
python
|
def _compress_with(input_filename, output_filename, compressors):
"""Helper function to compress an image with several compressors.
In case the compressors do not improve the filesize or in case the resulting
image is not equivalent to the source, then the output will be a copy of the
input.
"""
with _temporary_filenames(len(compressors)) as temp_filenames:
results = []
for compressor, temp_filename in zip(compressors, temp_filenames):
results.append(_process(compressor, input_filename, temp_filename))
best_result = min(results)
os.rename(best_result.filename, output_filename)
best_compressor = best_result.compressor
if best_result.size >= os.path.getsize(input_filename):
best_compressor = None
if (best_compressor is not None and
not _images_are_equal(input_filename, output_filename)):
logging.info('Compressor "%s" generated an invalid image for "%s"',
best_compressor, input_filename)
best_compressor = None
if best_compressor is None:
shutil.copy(input_filename, output_filename)
logging.info('%s: best compressor for "%s"', best_compressor,
input_filename)
|
[
"def",
"_compress_with",
"(",
"input_filename",
",",
"output_filename",
",",
"compressors",
")",
":",
"with",
"_temporary_filenames",
"(",
"len",
"(",
"compressors",
")",
")",
"as",
"temp_filenames",
":",
"results",
"=",
"[",
"]",
"for",
"compressor",
",",
"temp_filename",
"in",
"zip",
"(",
"compressors",
",",
"temp_filenames",
")",
":",
"results",
".",
"append",
"(",
"_process",
"(",
"compressor",
",",
"input_filename",
",",
"temp_filename",
")",
")",
"best_result",
"=",
"min",
"(",
"results",
")",
"os",
".",
"rename",
"(",
"best_result",
".",
"filename",
",",
"output_filename",
")",
"best_compressor",
"=",
"best_result",
".",
"compressor",
"if",
"best_result",
".",
"size",
">=",
"os",
".",
"path",
".",
"getsize",
"(",
"input_filename",
")",
":",
"best_compressor",
"=",
"None",
"if",
"(",
"best_compressor",
"is",
"not",
"None",
"and",
"not",
"_images_are_equal",
"(",
"input_filename",
",",
"output_filename",
")",
")",
":",
"logging",
".",
"info",
"(",
"'Compressor \"%s\" generated an invalid image for \"%s\"'",
",",
"best_compressor",
",",
"input_filename",
")",
"best_compressor",
"=",
"None",
"if",
"best_compressor",
"is",
"None",
":",
"shutil",
".",
"copy",
"(",
"input_filename",
",",
"output_filename",
")",
"logging",
".",
"info",
"(",
"'%s: best compressor for \"%s\"'",
",",
"best_compressor",
",",
"input_filename",
")"
] |
Helper function to compress an image with several compressors.
In case the compressors do not improve the filesize or in case the resulting
image is not equivalent to the source, then the output will be a copy of the
input.
|
[
"Helper",
"function",
"to",
"compress",
"an",
"image",
"with",
"several",
"compressors",
"."
] |
22694e5671b6adaccc4c9c87db7bdd701d20e734
|
https://github.com/jpscaletti/moar/blob/22694e5671b6adaccc4c9c87db7bdd701d20e734/moar/optimage.py#L140-L168
|
240,508
|
jcalogovic/lightning
|
stormstats/storm.py
|
Storm.add_to_map
|
def add_to_map(map_obj, lat, lon, date_time, key, cluster_obj):
"""Add individual elements to a foilum map in a cluster object"""
text = "Event {0} at {1}".format(key, date_time.split()[1])
folium.Marker([lat, lon], popup=text).add_to(cluster_obj)
|
python
|
def add_to_map(map_obj, lat, lon, date_time, key, cluster_obj):
"""Add individual elements to a foilum map in a cluster object"""
text = "Event {0} at {1}".format(key, date_time.split()[1])
folium.Marker([lat, lon], popup=text).add_to(cluster_obj)
|
[
"def",
"add_to_map",
"(",
"map_obj",
",",
"lat",
",",
"lon",
",",
"date_time",
",",
"key",
",",
"cluster_obj",
")",
":",
"text",
"=",
"\"Event {0} at {1}\"",
".",
"format",
"(",
"key",
",",
"date_time",
".",
"split",
"(",
")",
"[",
"1",
"]",
")",
"folium",
".",
"Marker",
"(",
"[",
"lat",
",",
"lon",
"]",
",",
"popup",
"=",
"text",
")",
".",
"add_to",
"(",
"cluster_obj",
")"
] |
Add individual elements to a foilum map in a cluster object
|
[
"Add",
"individual",
"elements",
"to",
"a",
"foilum",
"map",
"in",
"a",
"cluster",
"object"
] |
f9e52731c9dd40cb302295ec36a444e0377d0570
|
https://github.com/jcalogovic/lightning/blob/f9e52731c9dd40cb302295ec36a444e0377d0570/stormstats/storm.py#L72-L75
|
240,509
|
maxweisspoker/simplebitcoinfuncs
|
simplebitcoinfuncs/signandverify.py
|
sign
|
def sign(hash,priv,k=0):
'''
Returns a DER-encoded signature from a input of a hash and private
key, and optionally a K value.
Hash and private key inputs must be 64-char hex strings,
k input is an int/long.
>>> h = 'f7011e94125b5bba7f62eb25efe23339eb1637539206c87df3ee61b5ec6b023e'
>>> p = 'c05694a7af0e01dceb63e5912a415c28d3fc823ca1fd3fa34d41afde03740466'
>>> k = 4 # chosen by fair dice roll, guaranteed to be random
>>> sign(h,p,k)
'3045022100e493dbf1c10d80f3581e4904930b1404cc6c13900ee0758474fa94abe8c4cd130220598e37e2e66277ef4d0caf0e32d095debb3c744219508cd394b9747e548662b7'
'''
if k == 0:
k = generate_k(priv, hash)
hash = int(hash,16)
priv = int(priv,16)
r = int(privtopub(dechex(k,32),True)[2:],16) % N
s = ((hash + (r*priv)) * modinv(k,N)) % N
# High S value is non-standard (soon to be invalid)
if s > (N / 2):
s = N - s
r, s = inttoDER(r), inttoDER(s)
olen = dechex(len(r+s)//2,1)
return '30' + olen + r + s
|
python
|
def sign(hash,priv,k=0):
'''
Returns a DER-encoded signature from a input of a hash and private
key, and optionally a K value.
Hash and private key inputs must be 64-char hex strings,
k input is an int/long.
>>> h = 'f7011e94125b5bba7f62eb25efe23339eb1637539206c87df3ee61b5ec6b023e'
>>> p = 'c05694a7af0e01dceb63e5912a415c28d3fc823ca1fd3fa34d41afde03740466'
>>> k = 4 # chosen by fair dice roll, guaranteed to be random
>>> sign(h,p,k)
'3045022100e493dbf1c10d80f3581e4904930b1404cc6c13900ee0758474fa94abe8c4cd130220598e37e2e66277ef4d0caf0e32d095debb3c744219508cd394b9747e548662b7'
'''
if k == 0:
k = generate_k(priv, hash)
hash = int(hash,16)
priv = int(priv,16)
r = int(privtopub(dechex(k,32),True)[2:],16) % N
s = ((hash + (r*priv)) * modinv(k,N)) % N
# High S value is non-standard (soon to be invalid)
if s > (N / 2):
s = N - s
r, s = inttoDER(r), inttoDER(s)
olen = dechex(len(r+s)//2,1)
return '30' + olen + r + s
|
[
"def",
"sign",
"(",
"hash",
",",
"priv",
",",
"k",
"=",
"0",
")",
":",
"if",
"k",
"==",
"0",
":",
"k",
"=",
"generate_k",
"(",
"priv",
",",
"hash",
")",
"hash",
"=",
"int",
"(",
"hash",
",",
"16",
")",
"priv",
"=",
"int",
"(",
"priv",
",",
"16",
")",
"r",
"=",
"int",
"(",
"privtopub",
"(",
"dechex",
"(",
"k",
",",
"32",
")",
",",
"True",
")",
"[",
"2",
":",
"]",
",",
"16",
")",
"%",
"N",
"s",
"=",
"(",
"(",
"hash",
"+",
"(",
"r",
"*",
"priv",
")",
")",
"*",
"modinv",
"(",
"k",
",",
"N",
")",
")",
"%",
"N",
"# High S value is non-standard (soon to be invalid)",
"if",
"s",
">",
"(",
"N",
"/",
"2",
")",
":",
"s",
"=",
"N",
"-",
"s",
"r",
",",
"s",
"=",
"inttoDER",
"(",
"r",
")",
",",
"inttoDER",
"(",
"s",
")",
"olen",
"=",
"dechex",
"(",
"len",
"(",
"r",
"+",
"s",
")",
"//",
"2",
",",
"1",
")",
"return",
"'30'",
"+",
"olen",
"+",
"r",
"+",
"s"
] |
Returns a DER-encoded signature from a input of a hash and private
key, and optionally a K value.
Hash and private key inputs must be 64-char hex strings,
k input is an int/long.
>>> h = 'f7011e94125b5bba7f62eb25efe23339eb1637539206c87df3ee61b5ec6b023e'
>>> p = 'c05694a7af0e01dceb63e5912a415c28d3fc823ca1fd3fa34d41afde03740466'
>>> k = 4 # chosen by fair dice roll, guaranteed to be random
>>> sign(h,p,k)
'3045022100e493dbf1c10d80f3581e4904930b1404cc6c13900ee0758474fa94abe8c4cd130220598e37e2e66277ef4d0caf0e32d095debb3c744219508cd394b9747e548662b7'
|
[
"Returns",
"a",
"DER",
"-",
"encoded",
"signature",
"from",
"a",
"input",
"of",
"a",
"hash",
"and",
"private",
"key",
"and",
"optionally",
"a",
"K",
"value",
"."
] |
ad332433dfcc067e86d2e77fa0c8f1a27daffb63
|
https://github.com/maxweisspoker/simplebitcoinfuncs/blob/ad332433dfcc067e86d2e77fa0c8f1a27daffb63/simplebitcoinfuncs/signandverify.py#L36-L66
|
240,510
|
maxweisspoker/simplebitcoinfuncs
|
simplebitcoinfuncs/signandverify.py
|
verify
|
def verify(hash,sig,pub,exceptonhighS=False):
'''
Verify a DER-encoded signature against a given hash and public key
No checking of format is done in this function, so the signature
format (and other inputs) should be verified as being the correct
format prior to using this method.
Hash is just 64-char hex string
Public key format can be verified with validatepubkey() which is
found in .bitcoin
Signature format can be validated with checksigformat() which is
the next function after this
'exceptonhighS' is available because many Bitcoin implementations
will soon be invalidating high S values in signatures, in order
to reduce transaction malleability issues. I decided an exception
was preferable to returning False, so as to be distinct from a bad
signature.
>>> h = 'f7011e94125b5bba7f62eb25efe23339eb1637539206c87df3ee61b5ec6b023e'
>>> sig = '3045022100e493dbf1c10d80f3581e4904930b1404cc6c13900ee0758474fa94abe8c4cd130220598e37e2e66277ef4d0caf0e32d095debb3c744219508cd394b9747e548662b7'
>>> pub = '022587327dabe23ee608d8504d8bc3a341397db1c577370389f94ccd96bb59a077'
>>> verify(h,sig,pub)
True
>>> sig = '3046022100e493dbf1c10d80f3581e4904930b1404cc6c13900ee0758474fa94abe8c4cd13022100a671c81d199d8810b2f350f1cd2f6a1fff7268a495f813682b18ea0e7bafde8a'
>>> verify(h,sig,pub)
True
>>> verify(h,sig,uncompress(pub))
True
>>> verify(h,sig,pub,True)
Traceback (most recent call last):
...
TypeError: High S value.
'''
rlen = 2*int(sig[6:8],16)
r = int(sig[8:8+(rlen)],16)
s = int(sig[(12+rlen):],16) # Ignoring s-len; format dictates it
# will be to the end of string
assert r < N
if exceptonhighS:
if s > (N / 2):
raise TypeError("High S value.")
w = modinv(s,N)
x = int(addpubs(
privtopub(dechex((int(hash,16) * w) % N,32),False),
multiplypub(pub,dechex((r*w) % N,32),False),
False)[2:66],16)
return x==r
|
python
|
def verify(hash,sig,pub,exceptonhighS=False):
'''
Verify a DER-encoded signature against a given hash and public key
No checking of format is done in this function, so the signature
format (and other inputs) should be verified as being the correct
format prior to using this method.
Hash is just 64-char hex string
Public key format can be verified with validatepubkey() which is
found in .bitcoin
Signature format can be validated with checksigformat() which is
the next function after this
'exceptonhighS' is available because many Bitcoin implementations
will soon be invalidating high S values in signatures, in order
to reduce transaction malleability issues. I decided an exception
was preferable to returning False, so as to be distinct from a bad
signature.
>>> h = 'f7011e94125b5bba7f62eb25efe23339eb1637539206c87df3ee61b5ec6b023e'
>>> sig = '3045022100e493dbf1c10d80f3581e4904930b1404cc6c13900ee0758474fa94abe8c4cd130220598e37e2e66277ef4d0caf0e32d095debb3c744219508cd394b9747e548662b7'
>>> pub = '022587327dabe23ee608d8504d8bc3a341397db1c577370389f94ccd96bb59a077'
>>> verify(h,sig,pub)
True
>>> sig = '3046022100e493dbf1c10d80f3581e4904930b1404cc6c13900ee0758474fa94abe8c4cd13022100a671c81d199d8810b2f350f1cd2f6a1fff7268a495f813682b18ea0e7bafde8a'
>>> verify(h,sig,pub)
True
>>> verify(h,sig,uncompress(pub))
True
>>> verify(h,sig,pub,True)
Traceback (most recent call last):
...
TypeError: High S value.
'''
rlen = 2*int(sig[6:8],16)
r = int(sig[8:8+(rlen)],16)
s = int(sig[(12+rlen):],16) # Ignoring s-len; format dictates it
# will be to the end of string
assert r < N
if exceptonhighS:
if s > (N / 2):
raise TypeError("High S value.")
w = modinv(s,N)
x = int(addpubs(
privtopub(dechex((int(hash,16) * w) % N,32),False),
multiplypub(pub,dechex((r*w) % N,32),False),
False)[2:66],16)
return x==r
|
[
"def",
"verify",
"(",
"hash",
",",
"sig",
",",
"pub",
",",
"exceptonhighS",
"=",
"False",
")",
":",
"rlen",
"=",
"2",
"*",
"int",
"(",
"sig",
"[",
"6",
":",
"8",
"]",
",",
"16",
")",
"r",
"=",
"int",
"(",
"sig",
"[",
"8",
":",
"8",
"+",
"(",
"rlen",
")",
"]",
",",
"16",
")",
"s",
"=",
"int",
"(",
"sig",
"[",
"(",
"12",
"+",
"rlen",
")",
":",
"]",
",",
"16",
")",
"# Ignoring s-len; format dictates it",
"# will be to the end of string",
"assert",
"r",
"<",
"N",
"if",
"exceptonhighS",
":",
"if",
"s",
">",
"(",
"N",
"/",
"2",
")",
":",
"raise",
"TypeError",
"(",
"\"High S value.\"",
")",
"w",
"=",
"modinv",
"(",
"s",
",",
"N",
")",
"x",
"=",
"int",
"(",
"addpubs",
"(",
"privtopub",
"(",
"dechex",
"(",
"(",
"int",
"(",
"hash",
",",
"16",
")",
"*",
"w",
")",
"%",
"N",
",",
"32",
")",
",",
"False",
")",
",",
"multiplypub",
"(",
"pub",
",",
"dechex",
"(",
"(",
"r",
"*",
"w",
")",
"%",
"N",
",",
"32",
")",
",",
"False",
")",
",",
"False",
")",
"[",
"2",
":",
"66",
"]",
",",
"16",
")",
"return",
"x",
"==",
"r"
] |
Verify a DER-encoded signature against a given hash and public key
No checking of format is done in this function, so the signature
format (and other inputs) should be verified as being the correct
format prior to using this method.
Hash is just 64-char hex string
Public key format can be verified with validatepubkey() which is
found in .bitcoin
Signature format can be validated with checksigformat() which is
the next function after this
'exceptonhighS' is available because many Bitcoin implementations
will soon be invalidating high S values in signatures, in order
to reduce transaction malleability issues. I decided an exception
was preferable to returning False, so as to be distinct from a bad
signature.
>>> h = 'f7011e94125b5bba7f62eb25efe23339eb1637539206c87df3ee61b5ec6b023e'
>>> sig = '3045022100e493dbf1c10d80f3581e4904930b1404cc6c13900ee0758474fa94abe8c4cd130220598e37e2e66277ef4d0caf0e32d095debb3c744219508cd394b9747e548662b7'
>>> pub = '022587327dabe23ee608d8504d8bc3a341397db1c577370389f94ccd96bb59a077'
>>> verify(h,sig,pub)
True
>>> sig = '3046022100e493dbf1c10d80f3581e4904930b1404cc6c13900ee0758474fa94abe8c4cd13022100a671c81d199d8810b2f350f1cd2f6a1fff7268a495f813682b18ea0e7bafde8a'
>>> verify(h,sig,pub)
True
>>> verify(h,sig,uncompress(pub))
True
>>> verify(h,sig,pub,True)
Traceback (most recent call last):
...
TypeError: High S value.
|
[
"Verify",
"a",
"DER",
"-",
"encoded",
"signature",
"against",
"a",
"given",
"hash",
"and",
"public",
"key"
] |
ad332433dfcc067e86d2e77fa0c8f1a27daffb63
|
https://github.com/maxweisspoker/simplebitcoinfuncs/blob/ad332433dfcc067e86d2e77fa0c8f1a27daffb63/simplebitcoinfuncs/signandverify.py#L68-L119
|
240,511
|
maxweisspoker/simplebitcoinfuncs
|
simplebitcoinfuncs/signandverify.py
|
checksigformat
|
def checksigformat(a,invalidatehighS=False):
'''
Checks input to see if it's a correctly formatted DER Bitcoin
signature in hex string format.
Returns True/False. If it excepts, there's a different problem
unrelated to the signature...
This does NOT valid the signature in any way, it ONLY checks that
it is formatted properly.
If invalidatehighS is True, this function will return False on an
otherwise valid signature format if it has a high S value.
'''
try:
a = hexstrlify(unhexlify(a))
except:
return False
try:
rlen = 2*int(a[6:8],16)
slen = 2*int(a[(10+rlen):(12+rlen)],16)
r = a[8:8+(rlen)]
s1 = a[(12+rlen):]
s2 = a[(12+rlen):(12+rlen+slen)]
assert s1 == s2
s1 = int(s1,16)
assert s1 < N
assert a[:2] == '30'
assert len(a) == ((2*int(a[2:4],16)) + 4)
assert a[4:6] == '02'
assert a[(8+rlen):(10+rlen)] == '02'
if int(dechex(int(r,16))[:2],16) > 127:
assert r[:2] == '00'
assert r[2:4] != '00'
else:
assert r[:2] != '00'
if int(dechex(s1)[:2],16) > 127:
assert s2[:2] == '00'
assert s2[2:4] != '00'
else:
assert s2[:2] != '00'
assert len(r) < 67
assert len(s2) < 67
except AssertionError:
return False
except Exception as e:
raise Exception(str(e))
if invalidatehighS:
if s1 > (N / 2):
return False
return True
|
python
|
def checksigformat(a,invalidatehighS=False):
'''
Checks input to see if it's a correctly formatted DER Bitcoin
signature in hex string format.
Returns True/False. If it excepts, there's a different problem
unrelated to the signature...
This does NOT valid the signature in any way, it ONLY checks that
it is formatted properly.
If invalidatehighS is True, this function will return False on an
otherwise valid signature format if it has a high S value.
'''
try:
a = hexstrlify(unhexlify(a))
except:
return False
try:
rlen = 2*int(a[6:8],16)
slen = 2*int(a[(10+rlen):(12+rlen)],16)
r = a[8:8+(rlen)]
s1 = a[(12+rlen):]
s2 = a[(12+rlen):(12+rlen+slen)]
assert s1 == s2
s1 = int(s1,16)
assert s1 < N
assert a[:2] == '30'
assert len(a) == ((2*int(a[2:4],16)) + 4)
assert a[4:6] == '02'
assert a[(8+rlen):(10+rlen)] == '02'
if int(dechex(int(r,16))[:2],16) > 127:
assert r[:2] == '00'
assert r[2:4] != '00'
else:
assert r[:2] != '00'
if int(dechex(s1)[:2],16) > 127:
assert s2[:2] == '00'
assert s2[2:4] != '00'
else:
assert s2[:2] != '00'
assert len(r) < 67
assert len(s2) < 67
except AssertionError:
return False
except Exception as e:
raise Exception(str(e))
if invalidatehighS:
if s1 > (N / 2):
return False
return True
|
[
"def",
"checksigformat",
"(",
"a",
",",
"invalidatehighS",
"=",
"False",
")",
":",
"try",
":",
"a",
"=",
"hexstrlify",
"(",
"unhexlify",
"(",
"a",
")",
")",
"except",
":",
"return",
"False",
"try",
":",
"rlen",
"=",
"2",
"*",
"int",
"(",
"a",
"[",
"6",
":",
"8",
"]",
",",
"16",
")",
"slen",
"=",
"2",
"*",
"int",
"(",
"a",
"[",
"(",
"10",
"+",
"rlen",
")",
":",
"(",
"12",
"+",
"rlen",
")",
"]",
",",
"16",
")",
"r",
"=",
"a",
"[",
"8",
":",
"8",
"+",
"(",
"rlen",
")",
"]",
"s1",
"=",
"a",
"[",
"(",
"12",
"+",
"rlen",
")",
":",
"]",
"s2",
"=",
"a",
"[",
"(",
"12",
"+",
"rlen",
")",
":",
"(",
"12",
"+",
"rlen",
"+",
"slen",
")",
"]",
"assert",
"s1",
"==",
"s2",
"s1",
"=",
"int",
"(",
"s1",
",",
"16",
")",
"assert",
"s1",
"<",
"N",
"assert",
"a",
"[",
":",
"2",
"]",
"==",
"'30'",
"assert",
"len",
"(",
"a",
")",
"==",
"(",
"(",
"2",
"*",
"int",
"(",
"a",
"[",
"2",
":",
"4",
"]",
",",
"16",
")",
")",
"+",
"4",
")",
"assert",
"a",
"[",
"4",
":",
"6",
"]",
"==",
"'02'",
"assert",
"a",
"[",
"(",
"8",
"+",
"rlen",
")",
":",
"(",
"10",
"+",
"rlen",
")",
"]",
"==",
"'02'",
"if",
"int",
"(",
"dechex",
"(",
"int",
"(",
"r",
",",
"16",
")",
")",
"[",
":",
"2",
"]",
",",
"16",
")",
">",
"127",
":",
"assert",
"r",
"[",
":",
"2",
"]",
"==",
"'00'",
"assert",
"r",
"[",
"2",
":",
"4",
"]",
"!=",
"'00'",
"else",
":",
"assert",
"r",
"[",
":",
"2",
"]",
"!=",
"'00'",
"if",
"int",
"(",
"dechex",
"(",
"s1",
")",
"[",
":",
"2",
"]",
",",
"16",
")",
">",
"127",
":",
"assert",
"s2",
"[",
":",
"2",
"]",
"==",
"'00'",
"assert",
"s2",
"[",
"2",
":",
"4",
"]",
"!=",
"'00'",
"else",
":",
"assert",
"s2",
"[",
":",
"2",
"]",
"!=",
"'00'",
"assert",
"len",
"(",
"r",
")",
"<",
"67",
"assert",
"len",
"(",
"s2",
")",
"<",
"67",
"except",
"AssertionError",
":",
"return",
"False",
"except",
"Exception",
"as",
"e",
":",
"raise",
"Exception",
"(",
"str",
"(",
"e",
")",
")",
"if",
"invalidatehighS",
":",
"if",
"s1",
">",
"(",
"N",
"/",
"2",
")",
":",
"return",
"False",
"return",
"True"
] |
Checks input to see if it's a correctly formatted DER Bitcoin
signature in hex string format.
Returns True/False. If it excepts, there's a different problem
unrelated to the signature...
This does NOT valid the signature in any way, it ONLY checks that
it is formatted properly.
If invalidatehighS is True, this function will return False on an
otherwise valid signature format if it has a high S value.
|
[
"Checks",
"input",
"to",
"see",
"if",
"it",
"s",
"a",
"correctly",
"formatted",
"DER",
"Bitcoin",
"signature",
"in",
"hex",
"string",
"format",
"."
] |
ad332433dfcc067e86d2e77fa0c8f1a27daffb63
|
https://github.com/maxweisspoker/simplebitcoinfuncs/blob/ad332433dfcc067e86d2e77fa0c8f1a27daffb63/simplebitcoinfuncs/signandverify.py#L122-L182
|
240,512
|
briancappello/flask-sqlalchemy-bundle
|
flask_sqlalchemy_bundle/decorators.py
|
param_converter
|
def param_converter(*decorator_args, **decorator_kwargs):
"""
Call with the url parameter names as keyword argument keys, their values
being the model to convert to.
Models will be looked up by the url param names. If a url param name
is prefixed with the snake-cased model name, the prefix will be stripped.
If a model isn't found, abort with a 404.
The action's argument names must match the snake-cased model names.
For example::
@bp.route('/users/<int:user_id>/posts/<int:id>')
@param_converter(user_id=User, id=Post)
def show_post(user, post):
# the param converter does the database lookups:
# user = User.query.filter_by(id=user_id).first()
# post = Post.query.filter_by(id=id).first()
# and calls the decorated action: show_post(user, post)
# or to customize the argument names passed to the action:
@bp.route('/users/<int:user_id>/posts/<int:post_id>')
@param_converter(user_id={'user_arg_name': User},
post_id={'post_arg_name': Post})
def show_post(user_arg_name, post_arg_name):
# do stuff ...
Also supports parsing arguments from the query string. For query string
keyword arguments, use a lookup (dict, Enum) or callable::
@bp.route('/users/<int:id>')
@param_converter(id=User, foo=str, optional=int)
def show_user(user, foo, optional=10):
# GET /users/1?foo=bar
# calls show_user(user=User.get(1), foo='bar')
"""
def wrapped(fn):
@wraps(fn)
def decorated(*view_args, **view_kwargs):
view_kwargs = _convert_models(view_kwargs, decorator_kwargs)
view_kwargs = _convert_query_params(view_kwargs, decorator_kwargs)
return fn(*view_args, **view_kwargs)
return decorated
if decorator_args and callable(decorator_args[0]):
return wrapped(decorator_args[0])
return wrapped
|
python
|
def param_converter(*decorator_args, **decorator_kwargs):
"""
Call with the url parameter names as keyword argument keys, their values
being the model to convert to.
Models will be looked up by the url param names. If a url param name
is prefixed with the snake-cased model name, the prefix will be stripped.
If a model isn't found, abort with a 404.
The action's argument names must match the snake-cased model names.
For example::
@bp.route('/users/<int:user_id>/posts/<int:id>')
@param_converter(user_id=User, id=Post)
def show_post(user, post):
# the param converter does the database lookups:
# user = User.query.filter_by(id=user_id).first()
# post = Post.query.filter_by(id=id).first()
# and calls the decorated action: show_post(user, post)
# or to customize the argument names passed to the action:
@bp.route('/users/<int:user_id>/posts/<int:post_id>')
@param_converter(user_id={'user_arg_name': User},
post_id={'post_arg_name': Post})
def show_post(user_arg_name, post_arg_name):
# do stuff ...
Also supports parsing arguments from the query string. For query string
keyword arguments, use a lookup (dict, Enum) or callable::
@bp.route('/users/<int:id>')
@param_converter(id=User, foo=str, optional=int)
def show_user(user, foo, optional=10):
# GET /users/1?foo=bar
# calls show_user(user=User.get(1), foo='bar')
"""
def wrapped(fn):
@wraps(fn)
def decorated(*view_args, **view_kwargs):
view_kwargs = _convert_models(view_kwargs, decorator_kwargs)
view_kwargs = _convert_query_params(view_kwargs, decorator_kwargs)
return fn(*view_args, **view_kwargs)
return decorated
if decorator_args and callable(decorator_args[0]):
return wrapped(decorator_args[0])
return wrapped
|
[
"def",
"param_converter",
"(",
"*",
"decorator_args",
",",
"*",
"*",
"decorator_kwargs",
")",
":",
"def",
"wrapped",
"(",
"fn",
")",
":",
"@",
"wraps",
"(",
"fn",
")",
"def",
"decorated",
"(",
"*",
"view_args",
",",
"*",
"*",
"view_kwargs",
")",
":",
"view_kwargs",
"=",
"_convert_models",
"(",
"view_kwargs",
",",
"decorator_kwargs",
")",
"view_kwargs",
"=",
"_convert_query_params",
"(",
"view_kwargs",
",",
"decorator_kwargs",
")",
"return",
"fn",
"(",
"*",
"view_args",
",",
"*",
"*",
"view_kwargs",
")",
"return",
"decorated",
"if",
"decorator_args",
"and",
"callable",
"(",
"decorator_args",
"[",
"0",
"]",
")",
":",
"return",
"wrapped",
"(",
"decorator_args",
"[",
"0",
"]",
")",
"return",
"wrapped"
] |
Call with the url parameter names as keyword argument keys, their values
being the model to convert to.
Models will be looked up by the url param names. If a url param name
is prefixed with the snake-cased model name, the prefix will be stripped.
If a model isn't found, abort with a 404.
The action's argument names must match the snake-cased model names.
For example::
@bp.route('/users/<int:user_id>/posts/<int:id>')
@param_converter(user_id=User, id=Post)
def show_post(user, post):
# the param converter does the database lookups:
# user = User.query.filter_by(id=user_id).first()
# post = Post.query.filter_by(id=id).first()
# and calls the decorated action: show_post(user, post)
# or to customize the argument names passed to the action:
@bp.route('/users/<int:user_id>/posts/<int:post_id>')
@param_converter(user_id={'user_arg_name': User},
post_id={'post_arg_name': Post})
def show_post(user_arg_name, post_arg_name):
# do stuff ...
Also supports parsing arguments from the query string. For query string
keyword arguments, use a lookup (dict, Enum) or callable::
@bp.route('/users/<int:id>')
@param_converter(id=User, foo=str, optional=int)
def show_user(user, foo, optional=10):
# GET /users/1?foo=bar
# calls show_user(user=User.get(1), foo='bar')
|
[
"Call",
"with",
"the",
"url",
"parameter",
"names",
"as",
"keyword",
"argument",
"keys",
"their",
"values",
"being",
"the",
"model",
"to",
"convert",
"to",
"."
] |
8150896787907ef0001839b5a6ef303edccb9b6c
|
https://github.com/briancappello/flask-sqlalchemy-bundle/blob/8150896787907ef0001839b5a6ef303edccb9b6c/flask_sqlalchemy_bundle/decorators.py#L12-L60
|
240,513
|
inveniosoftware-attic/invenio-knowledge
|
invenio_knowledge/manage.py
|
load
|
def load(name, filepath, separator="---"):
"""Load given file into knowledge base.
Simply load data into an existing knowledge base:
.. code-block:: console
$ inveniomanage knowledge load mykb /path/to/file.kb
The file is expected to have a mapping with values: ``foo<seperator>bar``
(per line).
``<separator>`` is by default set to **---**, but can be overridden with
``-s someseperator`` or ``--sep someseperator``.
"""
current_app.logger.info(
">>> Going to load knowledge base {0} into '{1}'...".format(
filepath, name
)
)
if not os.path.isfile(filepath):
current_app.logger.error(
"Path to non-existing file\n",
file=sys.stderr
)
sys.exit(1)
try:
get_kb_by_name(name)
except NoResultFound:
current_app.logger.error(
"KB does not exist\n",
file=sys.stderr
)
sys.exit(1)
num_added = load_kb_mappings_file(name, filepath, separator)
current_app.logger.info(
">>> Knowledge '{0}' updated successfully with {1} entries.".format(
name, num_added
)
)
|
python
|
def load(name, filepath, separator="---"):
"""Load given file into knowledge base.
Simply load data into an existing knowledge base:
.. code-block:: console
$ inveniomanage knowledge load mykb /path/to/file.kb
The file is expected to have a mapping with values: ``foo<seperator>bar``
(per line).
``<separator>`` is by default set to **---**, but can be overridden with
``-s someseperator`` or ``--sep someseperator``.
"""
current_app.logger.info(
">>> Going to load knowledge base {0} into '{1}'...".format(
filepath, name
)
)
if not os.path.isfile(filepath):
current_app.logger.error(
"Path to non-existing file\n",
file=sys.stderr
)
sys.exit(1)
try:
get_kb_by_name(name)
except NoResultFound:
current_app.logger.error(
"KB does not exist\n",
file=sys.stderr
)
sys.exit(1)
num_added = load_kb_mappings_file(name, filepath, separator)
current_app.logger.info(
">>> Knowledge '{0}' updated successfully with {1} entries.".format(
name, num_added
)
)
|
[
"def",
"load",
"(",
"name",
",",
"filepath",
",",
"separator",
"=",
"\"---\"",
")",
":",
"current_app",
".",
"logger",
".",
"info",
"(",
"\">>> Going to load knowledge base {0} into '{1}'...\"",
".",
"format",
"(",
"filepath",
",",
"name",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"filepath",
")",
":",
"current_app",
".",
"logger",
".",
"error",
"(",
"\"Path to non-existing file\\n\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"try",
":",
"get_kb_by_name",
"(",
"name",
")",
"except",
"NoResultFound",
":",
"current_app",
".",
"logger",
".",
"error",
"(",
"\"KB does not exist\\n\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"num_added",
"=",
"load_kb_mappings_file",
"(",
"name",
",",
"filepath",
",",
"separator",
")",
"current_app",
".",
"logger",
".",
"info",
"(",
"\">>> Knowledge '{0}' updated successfully with {1} entries.\"",
".",
"format",
"(",
"name",
",",
"num_added",
")",
")"
] |
Load given file into knowledge base.
Simply load data into an existing knowledge base:
.. code-block:: console
$ inveniomanage knowledge load mykb /path/to/file.kb
The file is expected to have a mapping with values: ``foo<seperator>bar``
(per line).
``<separator>`` is by default set to **---**, but can be overridden with
``-s someseperator`` or ``--sep someseperator``.
|
[
"Load",
"given",
"file",
"into",
"knowledge",
"base",
"."
] |
b31722dc14243ca8f626f8b3bce9718d0119de55
|
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/manage.py#L44-L83
|
240,514
|
inveniosoftware-attic/invenio-knowledge
|
invenio_knowledge/manage.py
|
main
|
def main():
"""Run manager."""
from invenio_base.factory import create_app
app = create_app()
manager.app = app
manager.run()
|
python
|
def main():
"""Run manager."""
from invenio_base.factory import create_app
app = create_app()
manager.app = app
manager.run()
|
[
"def",
"main",
"(",
")",
":",
"from",
"invenio_base",
".",
"factory",
"import",
"create_app",
"app",
"=",
"create_app",
"(",
")",
"manager",
".",
"app",
"=",
"app",
"manager",
".",
"run",
"(",
")"
] |
Run manager.
|
[
"Run",
"manager",
"."
] |
b31722dc14243ca8f626f8b3bce9718d0119de55
|
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/manage.py#L86-L91
|
240,515
|
FlorianLudwig/rueckenwind
|
rw/cli.py
|
command
|
def command(func):
"""Decorator for CLI exposed functions"""
func.parser = SUB_PARSER.add_parser(func.__name__, help=func.__doc__)
func.parser.set_defaults(func=func)
return func
|
python
|
def command(func):
"""Decorator for CLI exposed functions"""
func.parser = SUB_PARSER.add_parser(func.__name__, help=func.__doc__)
func.parser.set_defaults(func=func)
return func
|
[
"def",
"command",
"(",
"func",
")",
":",
"func",
".",
"parser",
"=",
"SUB_PARSER",
".",
"add_parser",
"(",
"func",
".",
"__name__",
",",
"help",
"=",
"func",
".",
"__doc__",
")",
"func",
".",
"parser",
".",
"set_defaults",
"(",
"func",
"=",
"func",
")",
"return",
"func"
] |
Decorator for CLI exposed functions
|
[
"Decorator",
"for",
"CLI",
"exposed",
"functions"
] |
47fec7af05ea10b3cf6d59b9f7bf4d12c02dddea
|
https://github.com/FlorianLudwig/rueckenwind/blob/47fec7af05ea10b3cf6d59b9f7bf4d12c02dddea/rw/cli.py#L45-L49
|
240,516
|
FlorianLudwig/rueckenwind
|
rw/cli.py
|
serv
|
def serv(args):
"""Serve a rueckenwind application"""
if not args.no_debug:
tornado.autoreload.start()
extra = []
if sys.stdout.isatty():
# set terminal title
sys.stdout.write('\x1b]2;rw: {}\x07'.format(' '.join(sys.argv[2:])))
if args.cfg:
extra.append(os.path.abspath(args.cfg))
listen = (int(args.port), args.address)
ioloop = tornado.ioloop.IOLoop.instance()
setup_app(app=args.MODULE, extra_configs=extra,
ioloop=ioloop, listen=listen)
ioloop.start()
|
python
|
def serv(args):
"""Serve a rueckenwind application"""
if not args.no_debug:
tornado.autoreload.start()
extra = []
if sys.stdout.isatty():
# set terminal title
sys.stdout.write('\x1b]2;rw: {}\x07'.format(' '.join(sys.argv[2:])))
if args.cfg:
extra.append(os.path.abspath(args.cfg))
listen = (int(args.port), args.address)
ioloop = tornado.ioloop.IOLoop.instance()
setup_app(app=args.MODULE, extra_configs=extra,
ioloop=ioloop, listen=listen)
ioloop.start()
|
[
"def",
"serv",
"(",
"args",
")",
":",
"if",
"not",
"args",
".",
"no_debug",
":",
"tornado",
".",
"autoreload",
".",
"start",
"(",
")",
"extra",
"=",
"[",
"]",
"if",
"sys",
".",
"stdout",
".",
"isatty",
"(",
")",
":",
"# set terminal title",
"sys",
".",
"stdout",
".",
"write",
"(",
"'\\x1b]2;rw: {}\\x07'",
".",
"format",
"(",
"' '",
".",
"join",
"(",
"sys",
".",
"argv",
"[",
"2",
":",
"]",
")",
")",
")",
"if",
"args",
".",
"cfg",
":",
"extra",
".",
"append",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"args",
".",
"cfg",
")",
")",
"listen",
"=",
"(",
"int",
"(",
"args",
".",
"port",
")",
",",
"args",
".",
"address",
")",
"ioloop",
"=",
"tornado",
".",
"ioloop",
".",
"IOLoop",
".",
"instance",
"(",
")",
"setup_app",
"(",
"app",
"=",
"args",
".",
"MODULE",
",",
"extra_configs",
"=",
"extra",
",",
"ioloop",
"=",
"ioloop",
",",
"listen",
"=",
"listen",
")",
"ioloop",
".",
"start",
"(",
")"
] |
Serve a rueckenwind application
|
[
"Serve",
"a",
"rueckenwind",
"application"
] |
47fec7af05ea10b3cf6d59b9f7bf4d12c02dddea
|
https://github.com/FlorianLudwig/rueckenwind/blob/47fec7af05ea10b3cf6d59b9f7bf4d12c02dddea/rw/cli.py#L53-L71
|
240,517
|
FlorianLudwig/rueckenwind
|
rw/cli.py
|
main
|
def main():
"""Entry point of rw cli"""
# check logging
log_level = os.environ.get('LOG_LEVEL', 'INFO')
logging.basicConfig(level=getattr(logging, log_level),
format='%(asctime)s %(name)s[%(levelname)s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
current_path = os.path.abspath('.')
if current_path not in sys.path:
sys.path.insert(0, current_path)
argcomplete.autocomplete(ARG_PARSER)
args = ARG_PARSER.parse_args()
args.func(args)
|
python
|
def main():
"""Entry point of rw cli"""
# check logging
log_level = os.environ.get('LOG_LEVEL', 'INFO')
logging.basicConfig(level=getattr(logging, log_level),
format='%(asctime)s %(name)s[%(levelname)s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
current_path = os.path.abspath('.')
if current_path not in sys.path:
sys.path.insert(0, current_path)
argcomplete.autocomplete(ARG_PARSER)
args = ARG_PARSER.parse_args()
args.func(args)
|
[
"def",
"main",
"(",
")",
":",
"# check logging",
"log_level",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'LOG_LEVEL'",
",",
"'INFO'",
")",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"getattr",
"(",
"logging",
",",
"log_level",
")",
",",
"format",
"=",
"'%(asctime)s %(name)s[%(levelname)s] %(message)s'",
",",
"datefmt",
"=",
"'%Y-%m-%d %H:%M:%S'",
")",
"current_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"'.'",
")",
"if",
"current_path",
"not",
"in",
"sys",
".",
"path",
":",
"sys",
".",
"path",
".",
"insert",
"(",
"0",
",",
"current_path",
")",
"argcomplete",
".",
"autocomplete",
"(",
"ARG_PARSER",
")",
"args",
"=",
"ARG_PARSER",
".",
"parse_args",
"(",
")",
"args",
".",
"func",
"(",
"args",
")"
] |
Entry point of rw cli
|
[
"Entry",
"point",
"of",
"rw",
"cli"
] |
47fec7af05ea10b3cf6d59b9f7bf4d12c02dddea
|
https://github.com/FlorianLudwig/rueckenwind/blob/47fec7af05ea10b3cf6d59b9f7bf4d12c02dddea/rw/cli.py#L113-L126
|
240,518
|
jkawamoto/dsargparse
|
dsargparse.py
|
_checker
|
def _checker(keywords):
"""Generate a checker which tests a given value not starts with keywords."""
def _(v):
"""Check a given value matches to keywords."""
for k in keywords:
if k in v:
return False
return True
return _
|
python
|
def _checker(keywords):
"""Generate a checker which tests a given value not starts with keywords."""
def _(v):
"""Check a given value matches to keywords."""
for k in keywords:
if k in v:
return False
return True
return _
|
[
"def",
"_checker",
"(",
"keywords",
")",
":",
"def",
"_",
"(",
"v",
")",
":",
"\"\"\"Check a given value matches to keywords.\"\"\"",
"for",
"k",
"in",
"keywords",
":",
"if",
"k",
"in",
"v",
":",
"return",
"False",
"return",
"True",
"return",
"_"
] |
Generate a checker which tests a given value not starts with keywords.
|
[
"Generate",
"a",
"checker",
"which",
"tests",
"a",
"given",
"value",
"not",
"starts",
"with",
"keywords",
"."
] |
dbbcea11ff1ae7b84bdfccb9f97d1947574e4126
|
https://github.com/jkawamoto/dsargparse/blob/dbbcea11ff1ae7b84bdfccb9f97d1947574e4126/dsargparse.py#L37-L45
|
240,519
|
jkawamoto/dsargparse
|
dsargparse.py
|
_parse_doc
|
def _parse_doc(doc):
"""Parse a docstring.
Parse a docstring and extract three components; headline, description,
and map of arguments to help texts.
Args:
doc: docstring.
Returns:
a dictionary.
"""
lines = doc.split("\n")
descriptions = list(itertools.takewhile(_checker(_KEYWORDS), lines))
if len(descriptions) < 3:
description = lines[0]
else:
description = "{0}\n\n{1}".format(
lines[0], textwrap.dedent("\n".join(descriptions[2:])))
args = list(itertools.takewhile(
_checker(_KEYWORDS_OTHERS),
itertools.dropwhile(_checker(_KEYWORDS_ARGS), lines)))
argmap = {}
if len(args) > 1:
for pair in args[1:]:
kv = [v.strip() for v in pair.split(":")]
if len(kv) >= 2:
argmap[kv[0]] = ":".join(kv[1:])
return dict(headline=descriptions[0], description=description, args=argmap)
|
python
|
def _parse_doc(doc):
"""Parse a docstring.
Parse a docstring and extract three components; headline, description,
and map of arguments to help texts.
Args:
doc: docstring.
Returns:
a dictionary.
"""
lines = doc.split("\n")
descriptions = list(itertools.takewhile(_checker(_KEYWORDS), lines))
if len(descriptions) < 3:
description = lines[0]
else:
description = "{0}\n\n{1}".format(
lines[0], textwrap.dedent("\n".join(descriptions[2:])))
args = list(itertools.takewhile(
_checker(_KEYWORDS_OTHERS),
itertools.dropwhile(_checker(_KEYWORDS_ARGS), lines)))
argmap = {}
if len(args) > 1:
for pair in args[1:]:
kv = [v.strip() for v in pair.split(":")]
if len(kv) >= 2:
argmap[kv[0]] = ":".join(kv[1:])
return dict(headline=descriptions[0], description=description, args=argmap)
|
[
"def",
"_parse_doc",
"(",
"doc",
")",
":",
"lines",
"=",
"doc",
".",
"split",
"(",
"\"\\n\"",
")",
"descriptions",
"=",
"list",
"(",
"itertools",
".",
"takewhile",
"(",
"_checker",
"(",
"_KEYWORDS",
")",
",",
"lines",
")",
")",
"if",
"len",
"(",
"descriptions",
")",
"<",
"3",
":",
"description",
"=",
"lines",
"[",
"0",
"]",
"else",
":",
"description",
"=",
"\"{0}\\n\\n{1}\"",
".",
"format",
"(",
"lines",
"[",
"0",
"]",
",",
"textwrap",
".",
"dedent",
"(",
"\"\\n\"",
".",
"join",
"(",
"descriptions",
"[",
"2",
":",
"]",
")",
")",
")",
"args",
"=",
"list",
"(",
"itertools",
".",
"takewhile",
"(",
"_checker",
"(",
"_KEYWORDS_OTHERS",
")",
",",
"itertools",
".",
"dropwhile",
"(",
"_checker",
"(",
"_KEYWORDS_ARGS",
")",
",",
"lines",
")",
")",
")",
"argmap",
"=",
"{",
"}",
"if",
"len",
"(",
"args",
")",
">",
"1",
":",
"for",
"pair",
"in",
"args",
"[",
"1",
":",
"]",
":",
"kv",
"=",
"[",
"v",
".",
"strip",
"(",
")",
"for",
"v",
"in",
"pair",
".",
"split",
"(",
"\":\"",
")",
"]",
"if",
"len",
"(",
"kv",
")",
">=",
"2",
":",
"argmap",
"[",
"kv",
"[",
"0",
"]",
"]",
"=",
"\":\"",
".",
"join",
"(",
"kv",
"[",
"1",
":",
"]",
")",
"return",
"dict",
"(",
"headline",
"=",
"descriptions",
"[",
"0",
"]",
",",
"description",
"=",
"description",
",",
"args",
"=",
"argmap",
")"
] |
Parse a docstring.
Parse a docstring and extract three components; headline, description,
and map of arguments to help texts.
Args:
doc: docstring.
Returns:
a dictionary.
|
[
"Parse",
"a",
"docstring",
"."
] |
dbbcea11ff1ae7b84bdfccb9f97d1947574e4126
|
https://github.com/jkawamoto/dsargparse/blob/dbbcea11ff1ae7b84bdfccb9f97d1947574e4126/dsargparse.py#L48-L79
|
240,520
|
jkawamoto/dsargparse
|
dsargparse.py
|
_SubparsersWrapper.add_parser
|
def add_parser(self, func=None, name=None, **kwargs):
"""Add parser.
This method makes a new sub command parser. It takes same arguments
as add_parser() of the action class made by
argparse.ArgumentParser.add_subparsers.
In addition to, it takes one positional argument `func`, which is the
function implements process of this sub command. The `func` will be used
to determine the name, help, and description of this sub command. The
function `func` will also be set as a default value of `cmd` attribute.
If you want to choose name of this sub command, use keyword argument
`name`.
Args:
func: function implements the process of this command.
name: name of this command. If not give, the function name is used.
Returns:
new ArgumentParser object.
Raises:
ValueError: if the given function does not have docstrings.
"""
if func:
if not func.__doc__:
raise ValueError(
"No docstrings given in {0}".format(func.__name__))
info = _parse_doc(func.__doc__)
if _HELP not in kwargs or not kwargs[_HELP]:
kwargs[_HELP] = info["headline"]
if _DESCRIPTION not in kwargs or not kwargs[_DESCRIPTION]:
kwargs[_DESCRIPTION] = info["description"]
if _FORMAT_CLASS not in kwargs or not kwargs[_FORMAT_CLASS]:
kwargs[_FORMAT_CLASS] = argparse.RawTextHelpFormatter
if not name:
name = func.__name__ if hasattr(func, "__name__") else func
res = self.__delegate.add_parser(name, argmap=info["args"], **kwargs)
res.set_defaults(cmd=func)
else:
res = self.__delegate.add_parser(name, **kwargs)
return res
|
python
|
def add_parser(self, func=None, name=None, **kwargs):
"""Add parser.
This method makes a new sub command parser. It takes same arguments
as add_parser() of the action class made by
argparse.ArgumentParser.add_subparsers.
In addition to, it takes one positional argument `func`, which is the
function implements process of this sub command. The `func` will be used
to determine the name, help, and description of this sub command. The
function `func` will also be set as a default value of `cmd` attribute.
If you want to choose name of this sub command, use keyword argument
`name`.
Args:
func: function implements the process of this command.
name: name of this command. If not give, the function name is used.
Returns:
new ArgumentParser object.
Raises:
ValueError: if the given function does not have docstrings.
"""
if func:
if not func.__doc__:
raise ValueError(
"No docstrings given in {0}".format(func.__name__))
info = _parse_doc(func.__doc__)
if _HELP not in kwargs or not kwargs[_HELP]:
kwargs[_HELP] = info["headline"]
if _DESCRIPTION not in kwargs or not kwargs[_DESCRIPTION]:
kwargs[_DESCRIPTION] = info["description"]
if _FORMAT_CLASS not in kwargs or not kwargs[_FORMAT_CLASS]:
kwargs[_FORMAT_CLASS] = argparse.RawTextHelpFormatter
if not name:
name = func.__name__ if hasattr(func, "__name__") else func
res = self.__delegate.add_parser(name, argmap=info["args"], **kwargs)
res.set_defaults(cmd=func)
else:
res = self.__delegate.add_parser(name, **kwargs)
return res
|
[
"def",
"add_parser",
"(",
"self",
",",
"func",
"=",
"None",
",",
"name",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"func",
":",
"if",
"not",
"func",
".",
"__doc__",
":",
"raise",
"ValueError",
"(",
"\"No docstrings given in {0}\"",
".",
"format",
"(",
"func",
".",
"__name__",
")",
")",
"info",
"=",
"_parse_doc",
"(",
"func",
".",
"__doc__",
")",
"if",
"_HELP",
"not",
"in",
"kwargs",
"or",
"not",
"kwargs",
"[",
"_HELP",
"]",
":",
"kwargs",
"[",
"_HELP",
"]",
"=",
"info",
"[",
"\"headline\"",
"]",
"if",
"_DESCRIPTION",
"not",
"in",
"kwargs",
"or",
"not",
"kwargs",
"[",
"_DESCRIPTION",
"]",
":",
"kwargs",
"[",
"_DESCRIPTION",
"]",
"=",
"info",
"[",
"\"description\"",
"]",
"if",
"_FORMAT_CLASS",
"not",
"in",
"kwargs",
"or",
"not",
"kwargs",
"[",
"_FORMAT_CLASS",
"]",
":",
"kwargs",
"[",
"_FORMAT_CLASS",
"]",
"=",
"argparse",
".",
"RawTextHelpFormatter",
"if",
"not",
"name",
":",
"name",
"=",
"func",
".",
"__name__",
"if",
"hasattr",
"(",
"func",
",",
"\"__name__\"",
")",
"else",
"func",
"res",
"=",
"self",
".",
"__delegate",
".",
"add_parser",
"(",
"name",
",",
"argmap",
"=",
"info",
"[",
"\"args\"",
"]",
",",
"*",
"*",
"kwargs",
")",
"res",
".",
"set_defaults",
"(",
"cmd",
"=",
"func",
")",
"else",
":",
"res",
"=",
"self",
".",
"__delegate",
".",
"add_parser",
"(",
"name",
",",
"*",
"*",
"kwargs",
")",
"return",
"res"
] |
Add parser.
This method makes a new sub command parser. It takes same arguments
as add_parser() of the action class made by
argparse.ArgumentParser.add_subparsers.
In addition to, it takes one positional argument `func`, which is the
function implements process of this sub command. The `func` will be used
to determine the name, help, and description of this sub command. The
function `func` will also be set as a default value of `cmd` attribute.
If you want to choose name of this sub command, use keyword argument
`name`.
Args:
func: function implements the process of this command.
name: name of this command. If not give, the function name is used.
Returns:
new ArgumentParser object.
Raises:
ValueError: if the given function does not have docstrings.
|
[
"Add",
"parser",
"."
] |
dbbcea11ff1ae7b84bdfccb9f97d1947574e4126
|
https://github.com/jkawamoto/dsargparse/blob/dbbcea11ff1ae7b84bdfccb9f97d1947574e4126/dsargparse.py#L93-L140
|
240,521
|
jkawamoto/dsargparse
|
dsargparse.py
|
ArgumentParser.add_argument
|
def add_argument(self, *args, **kwargs):
"""Add an argument.
This method adds a new argument to the current parser. The function is
same as ``argparse.ArgumentParser.add_argument``. However, this method
tries to determine help messages for the adding argument from some
docstrings.
If the new arguments belong to some sub commands, the docstring
of a function implements behavior of the sub command has ``Args:`` section,
and defines same name variable, this function sets such
definition to the help message.
Positional Args:
same positional arguments as argparse.ArgumentParser.add_argument.
Keyword Args:
same keywards arguments as argparse.ArgumentParser.add_argument.
"""
if _HELP not in kwargs:
for name in args:
name = name.replace("-", "")
if name in self.__argmap:
kwargs[_HELP] = self.__argmap[name]
break
return super(ArgumentParser, self).add_argument(*args, **kwargs)
|
python
|
def add_argument(self, *args, **kwargs):
"""Add an argument.
This method adds a new argument to the current parser. The function is
same as ``argparse.ArgumentParser.add_argument``. However, this method
tries to determine help messages for the adding argument from some
docstrings.
If the new arguments belong to some sub commands, the docstring
of a function implements behavior of the sub command has ``Args:`` section,
and defines same name variable, this function sets such
definition to the help message.
Positional Args:
same positional arguments as argparse.ArgumentParser.add_argument.
Keyword Args:
same keywards arguments as argparse.ArgumentParser.add_argument.
"""
if _HELP not in kwargs:
for name in args:
name = name.replace("-", "")
if name in self.__argmap:
kwargs[_HELP] = self.__argmap[name]
break
return super(ArgumentParser, self).add_argument(*args, **kwargs)
|
[
"def",
"add_argument",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"_HELP",
"not",
"in",
"kwargs",
":",
"for",
"name",
"in",
"args",
":",
"name",
"=",
"name",
".",
"replace",
"(",
"\"-\"",
",",
"\"\"",
")",
"if",
"name",
"in",
"self",
".",
"__argmap",
":",
"kwargs",
"[",
"_HELP",
"]",
"=",
"self",
".",
"__argmap",
"[",
"name",
"]",
"break",
"return",
"super",
"(",
"ArgumentParser",
",",
"self",
")",
".",
"add_argument",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Add an argument.
This method adds a new argument to the current parser. The function is
same as ``argparse.ArgumentParser.add_argument``. However, this method
tries to determine help messages for the adding argument from some
docstrings.
If the new arguments belong to some sub commands, the docstring
of a function implements behavior of the sub command has ``Args:`` section,
and defines same name variable, this function sets such
definition to the help message.
Positional Args:
same positional arguments as argparse.ArgumentParser.add_argument.
Keyword Args:
same keywards arguments as argparse.ArgumentParser.add_argument.
|
[
"Add",
"an",
"argument",
"."
] |
dbbcea11ff1ae7b84bdfccb9f97d1947574e4126
|
https://github.com/jkawamoto/dsargparse/blob/dbbcea11ff1ae7b84bdfccb9f97d1947574e4126/dsargparse.py#L183-L208
|
240,522
|
JNRowe/jnrbase
|
jnrbase/xdg_basedir.py
|
__user_location
|
def __user_location(__pkg: str, type_) -> str:
"""Utility function to look up XDG basedir locations
Args:
__pkg: Package name
__type: Location type
"""
if ALLOW_DARWIN and sys.platform == 'darwin':
user_dir = '~/Library/{}'.format(__LOCATIONS[type_][0])
else:
user_dir = getenv('XDG_{}_HOME'.format(type_.upper()),
path.sep.join([getenv('HOME', ''),
__LOCATIONS[type_][1]]))
return path.expanduser(path.sep.join([user_dir, __pkg]))
|
python
|
def __user_location(__pkg: str, type_) -> str:
"""Utility function to look up XDG basedir locations
Args:
__pkg: Package name
__type: Location type
"""
if ALLOW_DARWIN and sys.platform == 'darwin':
user_dir = '~/Library/{}'.format(__LOCATIONS[type_][0])
else:
user_dir = getenv('XDG_{}_HOME'.format(type_.upper()),
path.sep.join([getenv('HOME', ''),
__LOCATIONS[type_][1]]))
return path.expanduser(path.sep.join([user_dir, __pkg]))
|
[
"def",
"__user_location",
"(",
"__pkg",
":",
"str",
",",
"type_",
")",
"->",
"str",
":",
"if",
"ALLOW_DARWIN",
"and",
"sys",
".",
"platform",
"==",
"'darwin'",
":",
"user_dir",
"=",
"'~/Library/{}'",
".",
"format",
"(",
"__LOCATIONS",
"[",
"type_",
"]",
"[",
"0",
"]",
")",
"else",
":",
"user_dir",
"=",
"getenv",
"(",
"'XDG_{}_HOME'",
".",
"format",
"(",
"type_",
".",
"upper",
"(",
")",
")",
",",
"path",
".",
"sep",
".",
"join",
"(",
"[",
"getenv",
"(",
"'HOME'",
",",
"''",
")",
",",
"__LOCATIONS",
"[",
"type_",
"]",
"[",
"1",
"]",
"]",
")",
")",
"return",
"path",
".",
"expanduser",
"(",
"path",
".",
"sep",
".",
"join",
"(",
"[",
"user_dir",
",",
"__pkg",
"]",
")",
")"
] |
Utility function to look up XDG basedir locations
Args:
__pkg: Package name
__type: Location type
|
[
"Utility",
"function",
"to",
"look",
"up",
"XDG",
"basedir",
"locations"
] |
ae505ef69a9feb739b5f4e62c5a8e6533104d3ea
|
https://github.com/JNRowe/jnrbase/blob/ae505ef69a9feb739b5f4e62c5a8e6533104d3ea/jnrbase/xdg_basedir.py#L38-L51
|
240,523
|
JNRowe/jnrbase
|
jnrbase/xdg_basedir.py
|
get_configs
|
def get_configs(__pkg: str, __name: str = 'config') -> List[str]:
"""Return all configs for given package.
Args:
__pkg: Package name
__name: Configuration file name
"""
dirs = [user_config(__pkg), ]
dirs.extend(path.expanduser(path.sep.join([d, __pkg]))
for d in getenv('XDG_CONFIG_DIRS', '/etc/xdg').split(':'))
configs = []
for dname in reversed(dirs):
test_path = path.join(dname, __name)
if path.exists(test_path):
configs.append(test_path)
return configs
|
python
|
def get_configs(__pkg: str, __name: str = 'config') -> List[str]:
"""Return all configs for given package.
Args:
__pkg: Package name
__name: Configuration file name
"""
dirs = [user_config(__pkg), ]
dirs.extend(path.expanduser(path.sep.join([d, __pkg]))
for d in getenv('XDG_CONFIG_DIRS', '/etc/xdg').split(':'))
configs = []
for dname in reversed(dirs):
test_path = path.join(dname, __name)
if path.exists(test_path):
configs.append(test_path)
return configs
|
[
"def",
"get_configs",
"(",
"__pkg",
":",
"str",
",",
"__name",
":",
"str",
"=",
"'config'",
")",
"->",
"List",
"[",
"str",
"]",
":",
"dirs",
"=",
"[",
"user_config",
"(",
"__pkg",
")",
",",
"]",
"dirs",
".",
"extend",
"(",
"path",
".",
"expanduser",
"(",
"path",
".",
"sep",
".",
"join",
"(",
"[",
"d",
",",
"__pkg",
"]",
")",
")",
"for",
"d",
"in",
"getenv",
"(",
"'XDG_CONFIG_DIRS'",
",",
"'/etc/xdg'",
")",
".",
"split",
"(",
"':'",
")",
")",
"configs",
"=",
"[",
"]",
"for",
"dname",
"in",
"reversed",
"(",
"dirs",
")",
":",
"test_path",
"=",
"path",
".",
"join",
"(",
"dname",
",",
"__name",
")",
"if",
"path",
".",
"exists",
"(",
"test_path",
")",
":",
"configs",
".",
"append",
"(",
"test_path",
")",
"return",
"configs"
] |
Return all configs for given package.
Args:
__pkg: Package name
__name: Configuration file name
|
[
"Return",
"all",
"configs",
"for",
"given",
"package",
"."
] |
ae505ef69a9feb739b5f4e62c5a8e6533104d3ea
|
https://github.com/JNRowe/jnrbase/blob/ae505ef69a9feb739b5f4e62c5a8e6533104d3ea/jnrbase/xdg_basedir.py#L75-L90
|
240,524
|
JNRowe/jnrbase
|
jnrbase/xdg_basedir.py
|
get_data
|
def get_data(__pkg: str, __name: str) -> str:
"""Return top-most data file for given package.
Args:
__pkg: Package name
__name: Data file name
"""
for dname in get_data_dirs(__pkg):
test_path = path.join(dname, __name)
if path.exists(test_path):
return test_path
raise FileNotFoundError('No data file {!r} for {!r}'.format(__name, __pkg))
|
python
|
def get_data(__pkg: str, __name: str) -> str:
"""Return top-most data file for given package.
Args:
__pkg: Package name
__name: Data file name
"""
for dname in get_data_dirs(__pkg):
test_path = path.join(dname, __name)
if path.exists(test_path):
return test_path
raise FileNotFoundError('No data file {!r} for {!r}'.format(__name, __pkg))
|
[
"def",
"get_data",
"(",
"__pkg",
":",
"str",
",",
"__name",
":",
"str",
")",
"->",
"str",
":",
"for",
"dname",
"in",
"get_data_dirs",
"(",
"__pkg",
")",
":",
"test_path",
"=",
"path",
".",
"join",
"(",
"dname",
",",
"__name",
")",
"if",
"path",
".",
"exists",
"(",
"test_path",
")",
":",
"return",
"test_path",
"raise",
"FileNotFoundError",
"(",
"'No data file {!r} for {!r}'",
".",
"format",
"(",
"__name",
",",
"__pkg",
")",
")"
] |
Return top-most data file for given package.
Args:
__pkg: Package name
__name: Data file name
|
[
"Return",
"top",
"-",
"most",
"data",
"file",
"for",
"given",
"package",
"."
] |
ae505ef69a9feb739b5f4e62c5a8e6533104d3ea
|
https://github.com/JNRowe/jnrbase/blob/ae505ef69a9feb739b5f4e62c5a8e6533104d3ea/jnrbase/xdg_basedir.py#L93-L104
|
240,525
|
JNRowe/jnrbase
|
jnrbase/xdg_basedir.py
|
get_data_dirs
|
def get_data_dirs(__pkg: str) -> List[str]:
"""Return all data directories for given package.
Args:
__pkg: Package name
"""
dirs = [user_data(__pkg), ]
dirs.extend(path.expanduser(path.sep.join([d, __pkg]))
for d in getenv('XDG_DATA_DIRS',
'/usr/local/share/:/usr/share/').split(':'))
return [d for d in dirs if path.isdir(d)]
|
python
|
def get_data_dirs(__pkg: str) -> List[str]:
"""Return all data directories for given package.
Args:
__pkg: Package name
"""
dirs = [user_data(__pkg), ]
dirs.extend(path.expanduser(path.sep.join([d, __pkg]))
for d in getenv('XDG_DATA_DIRS',
'/usr/local/share/:/usr/share/').split(':'))
return [d for d in dirs if path.isdir(d)]
|
[
"def",
"get_data_dirs",
"(",
"__pkg",
":",
"str",
")",
"->",
"List",
"[",
"str",
"]",
":",
"dirs",
"=",
"[",
"user_data",
"(",
"__pkg",
")",
",",
"]",
"dirs",
".",
"extend",
"(",
"path",
".",
"expanduser",
"(",
"path",
".",
"sep",
".",
"join",
"(",
"[",
"d",
",",
"__pkg",
"]",
")",
")",
"for",
"d",
"in",
"getenv",
"(",
"'XDG_DATA_DIRS'",
",",
"'/usr/local/share/:/usr/share/'",
")",
".",
"split",
"(",
"':'",
")",
")",
"return",
"[",
"d",
"for",
"d",
"in",
"dirs",
"if",
"path",
".",
"isdir",
"(",
"d",
")",
"]"
] |
Return all data directories for given package.
Args:
__pkg: Package name
|
[
"Return",
"all",
"data",
"directories",
"for",
"given",
"package",
"."
] |
ae505ef69a9feb739b5f4e62c5a8e6533104d3ea
|
https://github.com/JNRowe/jnrbase/blob/ae505ef69a9feb739b5f4e62c5a8e6533104d3ea/jnrbase/xdg_basedir.py#L107-L117
|
240,526
|
armenzg/pulse_replay
|
replay/replay.py
|
replay_messages
|
def replay_messages(filepath, process_message, *args, **kwargs):
''' Take pulse messages from a file and process each with process_message.
:param filepath: File containing dumped pulse messages
:type filepath: str
:param process_message: Function to process each pulse message with
:type process_message: function
:param *args: Arguments to be passed to process_message()
:type *args: tuple
:param **kwargs: Keyword argument to be passed to process_message()
:type **kwargs: dict
:returns: Nothing
:rtype: None
'''
message = Mock()
file_contents = _read_file(filepath)
for line in file_contents.splitlines():
# Using ast.literal_eval to turn pulse message strings into dicts
process_message(ast.literal_eval(line), message, *args, **kwargs)
|
python
|
def replay_messages(filepath, process_message, *args, **kwargs):
''' Take pulse messages from a file and process each with process_message.
:param filepath: File containing dumped pulse messages
:type filepath: str
:param process_message: Function to process each pulse message with
:type process_message: function
:param *args: Arguments to be passed to process_message()
:type *args: tuple
:param **kwargs: Keyword argument to be passed to process_message()
:type **kwargs: dict
:returns: Nothing
:rtype: None
'''
message = Mock()
file_contents = _read_file(filepath)
for line in file_contents.splitlines():
# Using ast.literal_eval to turn pulse message strings into dicts
process_message(ast.literal_eval(line), message, *args, **kwargs)
|
[
"def",
"replay_messages",
"(",
"filepath",
",",
"process_message",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"message",
"=",
"Mock",
"(",
")",
"file_contents",
"=",
"_read_file",
"(",
"filepath",
")",
"for",
"line",
"in",
"file_contents",
".",
"splitlines",
"(",
")",
":",
"# Using ast.literal_eval to turn pulse message strings into dicts",
"process_message",
"(",
"ast",
".",
"literal_eval",
"(",
"line",
")",
",",
"message",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Take pulse messages from a file and process each with process_message.
:param filepath: File containing dumped pulse messages
:type filepath: str
:param process_message: Function to process each pulse message with
:type process_message: function
:param *args: Arguments to be passed to process_message()
:type *args: tuple
:param **kwargs: Keyword argument to be passed to process_message()
:type **kwargs: dict
:returns: Nothing
:rtype: None
|
[
"Take",
"pulse",
"messages",
"from",
"a",
"file",
"and",
"process",
"each",
"with",
"process_message",
"."
] |
d3fae9f445aaaeb58d17898a597524375e9bf3ce
|
https://github.com/armenzg/pulse_replay/blob/d3fae9f445aaaeb58d17898a597524375e9bf3ce/replay/replay.py#L60-L81
|
240,527
|
robertchase/ergaleia
|
ergaleia/normalize_path.py
|
normalize_path
|
def normalize_path(path, filetype=None, has_filetype=True):
""" Convert dot-separated paths to directory paths
Allows non-python files to be placed in the PYTHONPATH and be referenced
using dot-notation instead of absolute or relative file-system paths.
If a text file, named test.txt was placed in a python repo named myprog in
the module named util, then:
normalize_path('myprog.util.test.txt')
would return the file-system's path to the file 'test.txt'.
Parameters:
path - path to convert
filetype - don't include as part of path if present as last token
has_filetype - if True, treat last dot-delimited token as filetype
Notes:
1. Paths are relative to PYTHONPATH.
2. If the specified path is not a string, it is returned without
change.
3. If the specified path contains os-specific path separator
characters, the path is returned without change.
4. If has_filetype is True, filetype does not have to be specified.
If filetype is specified, has_filetype is ignored, and filetype
must match the last dot-delimited token exactly.
"""
if not isinstance(path, str):
return path
if '.' in path and os.path.sep not in path: # path is dot separated
parts = path.split('.')
extension = ''
if len(parts) > 1:
if filetype and has_filetype:
has_filetype = False # filetype is more specific
if (filetype and parts[-1] == filetype) or has_filetype:
extension = '.' + parts[-1]
parts = parts[:-1]
if len(parts) > 1:
if PY3:
spec = importlib.util.find_spec(parts[0])
path = list(spec.submodule_search_locations)[0]
else:
_, path, _ = imp.find_module(parts[0])
path = os.path.join(path, *parts[1:]) + extension
return path
|
python
|
def normalize_path(path, filetype=None, has_filetype=True):
""" Convert dot-separated paths to directory paths
Allows non-python files to be placed in the PYTHONPATH and be referenced
using dot-notation instead of absolute or relative file-system paths.
If a text file, named test.txt was placed in a python repo named myprog in
the module named util, then:
normalize_path('myprog.util.test.txt')
would return the file-system's path to the file 'test.txt'.
Parameters:
path - path to convert
filetype - don't include as part of path if present as last token
has_filetype - if True, treat last dot-delimited token as filetype
Notes:
1. Paths are relative to PYTHONPATH.
2. If the specified path is not a string, it is returned without
change.
3. If the specified path contains os-specific path separator
characters, the path is returned without change.
4. If has_filetype is True, filetype does not have to be specified.
If filetype is specified, has_filetype is ignored, and filetype
must match the last dot-delimited token exactly.
"""
if not isinstance(path, str):
return path
if '.' in path and os.path.sep not in path: # path is dot separated
parts = path.split('.')
extension = ''
if len(parts) > 1:
if filetype and has_filetype:
has_filetype = False # filetype is more specific
if (filetype and parts[-1] == filetype) or has_filetype:
extension = '.' + parts[-1]
parts = parts[:-1]
if len(parts) > 1:
if PY3:
spec = importlib.util.find_spec(parts[0])
path = list(spec.submodule_search_locations)[0]
else:
_, path, _ = imp.find_module(parts[0])
path = os.path.join(path, *parts[1:]) + extension
return path
|
[
"def",
"normalize_path",
"(",
"path",
",",
"filetype",
"=",
"None",
",",
"has_filetype",
"=",
"True",
")",
":",
"if",
"not",
"isinstance",
"(",
"path",
",",
"str",
")",
":",
"return",
"path",
"if",
"'.'",
"in",
"path",
"and",
"os",
".",
"path",
".",
"sep",
"not",
"in",
"path",
":",
"# path is dot separated",
"parts",
"=",
"path",
".",
"split",
"(",
"'.'",
")",
"extension",
"=",
"''",
"if",
"len",
"(",
"parts",
")",
">",
"1",
":",
"if",
"filetype",
"and",
"has_filetype",
":",
"has_filetype",
"=",
"False",
"# filetype is more specific",
"if",
"(",
"filetype",
"and",
"parts",
"[",
"-",
"1",
"]",
"==",
"filetype",
")",
"or",
"has_filetype",
":",
"extension",
"=",
"'.'",
"+",
"parts",
"[",
"-",
"1",
"]",
"parts",
"=",
"parts",
"[",
":",
"-",
"1",
"]",
"if",
"len",
"(",
"parts",
")",
">",
"1",
":",
"if",
"PY3",
":",
"spec",
"=",
"importlib",
".",
"util",
".",
"find_spec",
"(",
"parts",
"[",
"0",
"]",
")",
"path",
"=",
"list",
"(",
"spec",
".",
"submodule_search_locations",
")",
"[",
"0",
"]",
"else",
":",
"_",
",",
"path",
",",
"_",
"=",
"imp",
".",
"find_module",
"(",
"parts",
"[",
"0",
"]",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"*",
"parts",
"[",
"1",
":",
"]",
")",
"+",
"extension",
"return",
"path"
] |
Convert dot-separated paths to directory paths
Allows non-python files to be placed in the PYTHONPATH and be referenced
using dot-notation instead of absolute or relative file-system paths.
If a text file, named test.txt was placed in a python repo named myprog in
the module named util, then:
normalize_path('myprog.util.test.txt')
would return the file-system's path to the file 'test.txt'.
Parameters:
path - path to convert
filetype - don't include as part of path if present as last token
has_filetype - if True, treat last dot-delimited token as filetype
Notes:
1. Paths are relative to PYTHONPATH.
2. If the specified path is not a string, it is returned without
change.
3. If the specified path contains os-specific path separator
characters, the path is returned without change.
4. If has_filetype is True, filetype does not have to be specified.
If filetype is specified, has_filetype is ignored, and filetype
must match the last dot-delimited token exactly.
|
[
"Convert",
"dot",
"-",
"separated",
"paths",
"to",
"directory",
"paths"
] |
df8e9a4b18c563022a503faa27e822c9a5755490
|
https://github.com/robertchase/ergaleia/blob/df8e9a4b18c563022a503faa27e822c9a5755490/ergaleia/normalize_path.py#L17-L65
|
240,528
|
HazardDede/dictmentor
|
dictmentor/utils.py
|
eval_first_non_none
|
def eval_first_non_none(eval_list: Iterable[Callable[..., Any]], **kwargs: Any) -> Any:
"""
Executes a list of functions and returns the first non none result. All kwargs will be passed as
kwargs to each individual function. If all functions return None, None is the overall result.
Examples:
>>> eval_first_non_none((lambda: None, lambda: None, lambda: 3))
3
>>> print(eval_first_non_none([lambda: None, lambda: None, lambda: None]))
None
>>> eval_first_non_none([
... lambda cnt: cnt if cnt == 1 else None,
... lambda cnt: cnt if cnt == 2 else None,
... lambda cnt: cnt if cnt == 3 else None]
... , cnt=2)
2
"""
Validator.is_real_iterable(raise_ex=True, eval_list=eval_list)
for eval_fun in eval_list:
res = eval_fun(**kwargs)
if res is not None:
return res
return None
|
python
|
def eval_first_non_none(eval_list: Iterable[Callable[..., Any]], **kwargs: Any) -> Any:
"""
Executes a list of functions and returns the first non none result. All kwargs will be passed as
kwargs to each individual function. If all functions return None, None is the overall result.
Examples:
>>> eval_first_non_none((lambda: None, lambda: None, lambda: 3))
3
>>> print(eval_first_non_none([lambda: None, lambda: None, lambda: None]))
None
>>> eval_first_non_none([
... lambda cnt: cnt if cnt == 1 else None,
... lambda cnt: cnt if cnt == 2 else None,
... lambda cnt: cnt if cnt == 3 else None]
... , cnt=2)
2
"""
Validator.is_real_iterable(raise_ex=True, eval_list=eval_list)
for eval_fun in eval_list:
res = eval_fun(**kwargs)
if res is not None:
return res
return None
|
[
"def",
"eval_first_non_none",
"(",
"eval_list",
":",
"Iterable",
"[",
"Callable",
"[",
"...",
",",
"Any",
"]",
"]",
",",
"*",
"*",
"kwargs",
":",
"Any",
")",
"->",
"Any",
":",
"Validator",
".",
"is_real_iterable",
"(",
"raise_ex",
"=",
"True",
",",
"eval_list",
"=",
"eval_list",
")",
"for",
"eval_fun",
"in",
"eval_list",
":",
"res",
"=",
"eval_fun",
"(",
"*",
"*",
"kwargs",
")",
"if",
"res",
"is",
"not",
"None",
":",
"return",
"res",
"return",
"None"
] |
Executes a list of functions and returns the first non none result. All kwargs will be passed as
kwargs to each individual function. If all functions return None, None is the overall result.
Examples:
>>> eval_first_non_none((lambda: None, lambda: None, lambda: 3))
3
>>> print(eval_first_non_none([lambda: None, lambda: None, lambda: None]))
None
>>> eval_first_non_none([
... lambda cnt: cnt if cnt == 1 else None,
... lambda cnt: cnt if cnt == 2 else None,
... lambda cnt: cnt if cnt == 3 else None]
... , cnt=2)
2
|
[
"Executes",
"a",
"list",
"of",
"functions",
"and",
"returns",
"the",
"first",
"non",
"none",
"result",
".",
"All",
"kwargs",
"will",
"be",
"passed",
"as",
"kwargs",
"to",
"each",
"individual",
"function",
".",
"If",
"all",
"functions",
"return",
"None",
"None",
"is",
"the",
"overall",
"result",
"."
] |
f50ca26ed04f7a924cde6e4d464c4f6ccba4e320
|
https://github.com/HazardDede/dictmentor/blob/f50ca26ed04f7a924cde6e4d464c4f6ccba4e320/dictmentor/utils.py#L112-L136
|
240,529
|
the01/python-paps
|
paps/crowd/controller.py
|
CrowdController.on_person_new
|
def on_person_new(self, people):
"""
New people joined the audience
:param people: People that just joined the audience
:type people: list[paps.person.Person]
:rtype: None
"""
self.debug("()")
changed = []
with self._people_lock:
for p in people:
person = Person.from_person(p)
if person.id in self._people:
self.warning(
u"{} already in audience".format(person.id)
)
self._people[person.id] = person
changed.append(person)
for plugin in self.plugins:
try:
plugin.on_person_new(changed)
except:
self.exception(
u"Failed to send new people to {}".format(plugin.name)
)
|
python
|
def on_person_new(self, people):
"""
New people joined the audience
:param people: People that just joined the audience
:type people: list[paps.person.Person]
:rtype: None
"""
self.debug("()")
changed = []
with self._people_lock:
for p in people:
person = Person.from_person(p)
if person.id in self._people:
self.warning(
u"{} already in audience".format(person.id)
)
self._people[person.id] = person
changed.append(person)
for plugin in self.plugins:
try:
plugin.on_person_new(changed)
except:
self.exception(
u"Failed to send new people to {}".format(plugin.name)
)
|
[
"def",
"on_person_new",
"(",
"self",
",",
"people",
")",
":",
"self",
".",
"debug",
"(",
"\"()\"",
")",
"changed",
"=",
"[",
"]",
"with",
"self",
".",
"_people_lock",
":",
"for",
"p",
"in",
"people",
":",
"person",
"=",
"Person",
".",
"from_person",
"(",
"p",
")",
"if",
"person",
".",
"id",
"in",
"self",
".",
"_people",
":",
"self",
".",
"warning",
"(",
"u\"{} already in audience\"",
".",
"format",
"(",
"person",
".",
"id",
")",
")",
"self",
".",
"_people",
"[",
"person",
".",
"id",
"]",
"=",
"person",
"changed",
".",
"append",
"(",
"person",
")",
"for",
"plugin",
"in",
"self",
".",
"plugins",
":",
"try",
":",
"plugin",
".",
"on_person_new",
"(",
"changed",
")",
"except",
":",
"self",
".",
"exception",
"(",
"u\"Failed to send new people to {}\"",
".",
"format",
"(",
"plugin",
".",
"name",
")",
")"
] |
New people joined the audience
:param people: People that just joined the audience
:type people: list[paps.person.Person]
:rtype: None
|
[
"New",
"people",
"joined",
"the",
"audience"
] |
2dde5a71913e4c7b22901cf05c6ecedd890919c4
|
https://github.com/the01/python-paps/blob/2dde5a71913e4c7b22901cf05c6ecedd890919c4/paps/crowd/controller.py#L49-L74
|
240,530
|
hobson/pug-dj
|
pug/dj/miner/management/commands/modeldb.py
|
Command.normalize_col_name
|
def normalize_col_name(self, col_name, used_column_names, is_relation):
"""
Modify the column name to make it Python-compatible as a field name
"""
field_params = {}
field_notes = []
new_name = col_name.lower()
if new_name != col_name:
field_notes.append('Field name made lowercase.')
if is_relation:
if new_name.endswith('_id'):
new_name = new_name[:-3]
else:
field_params['db_column'] = col_name
new_name, num_repl = re.subn(r'\W', '_', new_name)
if num_repl > 0:
field_notes.append('Field renamed to remove unsuitable characters.')
if new_name.find('__') >= 0:
while new_name.find('__') >= 0:
new_name = new_name.replace('__', '_')
if col_name.lower().find('__') >= 0:
# Only add the comment if the double underscore was in the original name
field_notes.append("Field renamed because it contained more than one '_' in a row.")
if new_name.startswith('_'):
new_name = 'field%s' % new_name
field_notes.append("Field renamed because it started with '_'.")
if new_name.endswith('_'):
new_name = '%sfield' % new_name
field_notes.append("Field renamed because it ended with '_'.")
if keyword.iskeyword(new_name):
new_name += '_field'
field_notes.append('Field renamed because it was a Python reserved word.')
if new_name[0].isdigit():
new_name = 'number_%s' % new_name
field_notes.append("Field renamed because it wasn't a valid Python identifier.")
if new_name in used_column_names:
num = 0
while '%s_%d' % (new_name, num) in used_column_names:
num += 1
new_name = '%s_%d' % (new_name, num)
field_notes.append('Field renamed because of name conflict.')
if col_name != new_name and field_notes:
field_params['db_column'] = col_name
return new_name, field_params, field_notes
|
python
|
def normalize_col_name(self, col_name, used_column_names, is_relation):
"""
Modify the column name to make it Python-compatible as a field name
"""
field_params = {}
field_notes = []
new_name = col_name.lower()
if new_name != col_name:
field_notes.append('Field name made lowercase.')
if is_relation:
if new_name.endswith('_id'):
new_name = new_name[:-3]
else:
field_params['db_column'] = col_name
new_name, num_repl = re.subn(r'\W', '_', new_name)
if num_repl > 0:
field_notes.append('Field renamed to remove unsuitable characters.')
if new_name.find('__') >= 0:
while new_name.find('__') >= 0:
new_name = new_name.replace('__', '_')
if col_name.lower().find('__') >= 0:
# Only add the comment if the double underscore was in the original name
field_notes.append("Field renamed because it contained more than one '_' in a row.")
if new_name.startswith('_'):
new_name = 'field%s' % new_name
field_notes.append("Field renamed because it started with '_'.")
if new_name.endswith('_'):
new_name = '%sfield' % new_name
field_notes.append("Field renamed because it ended with '_'.")
if keyword.iskeyword(new_name):
new_name += '_field'
field_notes.append('Field renamed because it was a Python reserved word.')
if new_name[0].isdigit():
new_name = 'number_%s' % new_name
field_notes.append("Field renamed because it wasn't a valid Python identifier.")
if new_name in used_column_names:
num = 0
while '%s_%d' % (new_name, num) in used_column_names:
num += 1
new_name = '%s_%d' % (new_name, num)
field_notes.append('Field renamed because of name conflict.')
if col_name != new_name and field_notes:
field_params['db_column'] = col_name
return new_name, field_params, field_notes
|
[
"def",
"normalize_col_name",
"(",
"self",
",",
"col_name",
",",
"used_column_names",
",",
"is_relation",
")",
":",
"field_params",
"=",
"{",
"}",
"field_notes",
"=",
"[",
"]",
"new_name",
"=",
"col_name",
".",
"lower",
"(",
")",
"if",
"new_name",
"!=",
"col_name",
":",
"field_notes",
".",
"append",
"(",
"'Field name made lowercase.'",
")",
"if",
"is_relation",
":",
"if",
"new_name",
".",
"endswith",
"(",
"'_id'",
")",
":",
"new_name",
"=",
"new_name",
"[",
":",
"-",
"3",
"]",
"else",
":",
"field_params",
"[",
"'db_column'",
"]",
"=",
"col_name",
"new_name",
",",
"num_repl",
"=",
"re",
".",
"subn",
"(",
"r'\\W'",
",",
"'_'",
",",
"new_name",
")",
"if",
"num_repl",
">",
"0",
":",
"field_notes",
".",
"append",
"(",
"'Field renamed to remove unsuitable characters.'",
")",
"if",
"new_name",
".",
"find",
"(",
"'__'",
")",
">=",
"0",
":",
"while",
"new_name",
".",
"find",
"(",
"'__'",
")",
">=",
"0",
":",
"new_name",
"=",
"new_name",
".",
"replace",
"(",
"'__'",
",",
"'_'",
")",
"if",
"col_name",
".",
"lower",
"(",
")",
".",
"find",
"(",
"'__'",
")",
">=",
"0",
":",
"# Only add the comment if the double underscore was in the original name",
"field_notes",
".",
"append",
"(",
"\"Field renamed because it contained more than one '_' in a row.\"",
")",
"if",
"new_name",
".",
"startswith",
"(",
"'_'",
")",
":",
"new_name",
"=",
"'field%s'",
"%",
"new_name",
"field_notes",
".",
"append",
"(",
"\"Field renamed because it started with '_'.\"",
")",
"if",
"new_name",
".",
"endswith",
"(",
"'_'",
")",
":",
"new_name",
"=",
"'%sfield'",
"%",
"new_name",
"field_notes",
".",
"append",
"(",
"\"Field renamed because it ended with '_'.\"",
")",
"if",
"keyword",
".",
"iskeyword",
"(",
"new_name",
")",
":",
"new_name",
"+=",
"'_field'",
"field_notes",
".",
"append",
"(",
"'Field renamed because it was a Python reserved word.'",
")",
"if",
"new_name",
"[",
"0",
"]",
".",
"isdigit",
"(",
")",
":",
"new_name",
"=",
"'number_%s'",
"%",
"new_name",
"field_notes",
".",
"append",
"(",
"\"Field renamed because it wasn't a valid Python identifier.\"",
")",
"if",
"new_name",
"in",
"used_column_names",
":",
"num",
"=",
"0",
"while",
"'%s_%d'",
"%",
"(",
"new_name",
",",
"num",
")",
"in",
"used_column_names",
":",
"num",
"+=",
"1",
"new_name",
"=",
"'%s_%d'",
"%",
"(",
"new_name",
",",
"num",
")",
"field_notes",
".",
"append",
"(",
"'Field renamed because of name conflict.'",
")",
"if",
"col_name",
"!=",
"new_name",
"and",
"field_notes",
":",
"field_params",
"[",
"'db_column'",
"]",
"=",
"col_name",
"return",
"new_name",
",",
"field_params",
",",
"field_notes"
] |
Modify the column name to make it Python-compatible as a field name
|
[
"Modify",
"the",
"column",
"name",
"to",
"make",
"it",
"Python",
"-",
"compatible",
"as",
"a",
"field",
"name"
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/miner/management/commands/modeldb.py#L129-L183
|
240,531
|
hobson/pug-dj
|
pug/dj/miner/management/commands/modeldb.py
|
Command.get_field_type
|
def get_field_type(self, connection, table_name, row):
"""
Given the database connection, the table name, and the cursor row
description, this routine will return the given field type name, as
well as any additional keyword parameters and notes for the field.
"""
field_params = {}
field_notes = []
try:
field_type = connection.introspection.get_field_type(row[1], row)
except KeyError:
field_type = 'TextField'
field_notes.append('This field type is a guess.')
# This is a hook for DATA_TYPES_REVERSE to return a tuple of
# (field_type, field_params_dict).
if type(field_type) is tuple:
field_type, new_params = field_type
field_params.update(new_params)
# Add max_length for all CharFields.
if field_type == 'CharField' and row[3]:
field_params['max_length'] = row[3]
if field_type == 'DecimalField':
field_params['max_digits'] = row[4]
field_params['decimal_places'] = row[5]
return field_type, field_params, field_notes
|
python
|
def get_field_type(self, connection, table_name, row):
"""
Given the database connection, the table name, and the cursor row
description, this routine will return the given field type name, as
well as any additional keyword parameters and notes for the field.
"""
field_params = {}
field_notes = []
try:
field_type = connection.introspection.get_field_type(row[1], row)
except KeyError:
field_type = 'TextField'
field_notes.append('This field type is a guess.')
# This is a hook for DATA_TYPES_REVERSE to return a tuple of
# (field_type, field_params_dict).
if type(field_type) is tuple:
field_type, new_params = field_type
field_params.update(new_params)
# Add max_length for all CharFields.
if field_type == 'CharField' and row[3]:
field_params['max_length'] = row[3]
if field_type == 'DecimalField':
field_params['max_digits'] = row[4]
field_params['decimal_places'] = row[5]
return field_type, field_params, field_notes
|
[
"def",
"get_field_type",
"(",
"self",
",",
"connection",
",",
"table_name",
",",
"row",
")",
":",
"field_params",
"=",
"{",
"}",
"field_notes",
"=",
"[",
"]",
"try",
":",
"field_type",
"=",
"connection",
".",
"introspection",
".",
"get_field_type",
"(",
"row",
"[",
"1",
"]",
",",
"row",
")",
"except",
"KeyError",
":",
"field_type",
"=",
"'TextField'",
"field_notes",
".",
"append",
"(",
"'This field type is a guess.'",
")",
"# This is a hook for DATA_TYPES_REVERSE to return a tuple of",
"# (field_type, field_params_dict).",
"if",
"type",
"(",
"field_type",
")",
"is",
"tuple",
":",
"field_type",
",",
"new_params",
"=",
"field_type",
"field_params",
".",
"update",
"(",
"new_params",
")",
"# Add max_length for all CharFields.",
"if",
"field_type",
"==",
"'CharField'",
"and",
"row",
"[",
"3",
"]",
":",
"field_params",
"[",
"'max_length'",
"]",
"=",
"row",
"[",
"3",
"]",
"if",
"field_type",
"==",
"'DecimalField'",
":",
"field_params",
"[",
"'max_digits'",
"]",
"=",
"row",
"[",
"4",
"]",
"field_params",
"[",
"'decimal_places'",
"]",
"=",
"row",
"[",
"5",
"]",
"return",
"field_type",
",",
"field_params",
",",
"field_notes"
] |
Given the database connection, the table name, and the cursor row
description, this routine will return the given field type name, as
well as any additional keyword parameters and notes for the field.
|
[
"Given",
"the",
"database",
"connection",
"the",
"table",
"name",
"and",
"the",
"cursor",
"row",
"description",
"this",
"routine",
"will",
"return",
"the",
"given",
"field",
"type",
"name",
"as",
"well",
"as",
"any",
"additional",
"keyword",
"parameters",
"and",
"notes",
"for",
"the",
"field",
"."
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/miner/management/commands/modeldb.py#L185-L214
|
240,532
|
firstprayer/monsql
|
monsql/__init__.py
|
MonSQL
|
def MonSQL(host=None, port=None, username=None, password=None, dbname=None, dbpath=None, dbtype=None):
"""
Initialize and return a Database instance
"""
if dbtype is None:
raise MonSQLException('Database type must be specified')
if dbtype == DB_TYPES.MYSQL:
return MySQLDatabase(host, port, username, password, dbname)
elif dbtype == DB_TYPES.SQLITE3:
return SQLite3Database(dbpath)
elif dbtype == DB_TYPES.POSTGRESQL:
return PostgreSQLDatabase(host, port, username, password, dbname)
else:
raise MonSQLException('Database type %s not supported' %dbtype)
|
python
|
def MonSQL(host=None, port=None, username=None, password=None, dbname=None, dbpath=None, dbtype=None):
"""
Initialize and return a Database instance
"""
if dbtype is None:
raise MonSQLException('Database type must be specified')
if dbtype == DB_TYPES.MYSQL:
return MySQLDatabase(host, port, username, password, dbname)
elif dbtype == DB_TYPES.SQLITE3:
return SQLite3Database(dbpath)
elif dbtype == DB_TYPES.POSTGRESQL:
return PostgreSQLDatabase(host, port, username, password, dbname)
else:
raise MonSQLException('Database type %s not supported' %dbtype)
|
[
"def",
"MonSQL",
"(",
"host",
"=",
"None",
",",
"port",
"=",
"None",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
",",
"dbname",
"=",
"None",
",",
"dbpath",
"=",
"None",
",",
"dbtype",
"=",
"None",
")",
":",
"if",
"dbtype",
"is",
"None",
":",
"raise",
"MonSQLException",
"(",
"'Database type must be specified'",
")",
"if",
"dbtype",
"==",
"DB_TYPES",
".",
"MYSQL",
":",
"return",
"MySQLDatabase",
"(",
"host",
",",
"port",
",",
"username",
",",
"password",
",",
"dbname",
")",
"elif",
"dbtype",
"==",
"DB_TYPES",
".",
"SQLITE3",
":",
"return",
"SQLite3Database",
"(",
"dbpath",
")",
"elif",
"dbtype",
"==",
"DB_TYPES",
".",
"POSTGRESQL",
":",
"return",
"PostgreSQLDatabase",
"(",
"host",
",",
"port",
",",
"username",
",",
"password",
",",
"dbname",
")",
"else",
":",
"raise",
"MonSQLException",
"(",
"'Database type %s not supported'",
"%",
"dbtype",
")"
] |
Initialize and return a Database instance
|
[
"Initialize",
"and",
"return",
"a",
"Database",
"instance"
] |
6285c15b574c8664046eae2edfeb548c7b173efd
|
https://github.com/firstprayer/monsql/blob/6285c15b574c8664046eae2edfeb548c7b173efd/monsql/__init__.py#L26-L40
|
240,533
|
damienjones/sculpt-model-tools
|
sculpt/model_tools/mixins.py
|
PasswordMixin.check_password
|
def check_password(self, raw_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
hashing formats behind the scenes.
"""
def setter(raw_password):
self.set_password(raw_password)
self.save(update_fields=[self.PASSWORD_FIELD])
return check_password(raw_password, getattr(self, self.PASSWORD_FIELD), setter)
|
python
|
def check_password(self, raw_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
hashing formats behind the scenes.
"""
def setter(raw_password):
self.set_password(raw_password)
self.save(update_fields=[self.PASSWORD_FIELD])
return check_password(raw_password, getattr(self, self.PASSWORD_FIELD), setter)
|
[
"def",
"check_password",
"(",
"self",
",",
"raw_password",
")",
":",
"def",
"setter",
"(",
"raw_password",
")",
":",
"self",
".",
"set_password",
"(",
"raw_password",
")",
"self",
".",
"save",
"(",
"update_fields",
"=",
"[",
"self",
".",
"PASSWORD_FIELD",
"]",
")",
"return",
"check_password",
"(",
"raw_password",
",",
"getattr",
"(",
"self",
",",
"self",
".",
"PASSWORD_FIELD",
")",
",",
"setter",
")"
] |
Returns a boolean of whether the raw_password was correct. Handles
hashing formats behind the scenes.
|
[
"Returns",
"a",
"boolean",
"of",
"whether",
"the",
"raw_password",
"was",
"correct",
".",
"Handles",
"hashing",
"formats",
"behind",
"the",
"scenes",
"."
] |
28d5a7d92bd73bcb1016b30736b6b07070e4cc98
|
https://github.com/damienjones/sculpt-model-tools/blob/28d5a7d92bd73bcb1016b30736b6b07070e4cc98/sculpt/model_tools/mixins.py#L280-L288
|
240,534
|
s1s5/django_busybody
|
django_busybody/views.py
|
MultipleFormMixin.get_context_data
|
def get_context_data(self, **kwargs):
"""
Insert the form into the context dict.
"""
for key in self.get_form_class_keys():
kwargs['{}_form'.format(key)] = self.get_form(key)
return super(FormMixin, self).get_context_data(**kwargs)
|
python
|
def get_context_data(self, **kwargs):
"""
Insert the form into the context dict.
"""
for key in self.get_form_class_keys():
kwargs['{}_form'.format(key)] = self.get_form(key)
return super(FormMixin, self).get_context_data(**kwargs)
|
[
"def",
"get_context_data",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"key",
"in",
"self",
".",
"get_form_class_keys",
"(",
")",
":",
"kwargs",
"[",
"'{}_form'",
".",
"format",
"(",
"key",
")",
"]",
"=",
"self",
".",
"get_form",
"(",
"key",
")",
"return",
"super",
"(",
"FormMixin",
",",
"self",
")",
".",
"get_context_data",
"(",
"*",
"*",
"kwargs",
")"
] |
Insert the form into the context dict.
|
[
"Insert",
"the",
"form",
"into",
"the",
"context",
"dict",
"."
] |
5c6fd89824224f1de1be79ccd9a149f025af1b8f
|
https://github.com/s1s5/django_busybody/blob/5c6fd89824224f1de1be79ccd9a149f025af1b8f/django_busybody/views.py#L100-L106
|
240,535
|
s1s5/django_busybody
|
django_busybody/views.py
|
MultipleModelFormMixin.form_valid
|
def form_valid(self, forms):
"""
If the form is valid, save the associated model.
"""
for key, form in forms.items():
setattr(self, '{}_object'.format(key), form.save())
return super(MultipleModelFormMixin, self).form_valid(forms)
|
python
|
def form_valid(self, forms):
"""
If the form is valid, save the associated model.
"""
for key, form in forms.items():
setattr(self, '{}_object'.format(key), form.save())
return super(MultipleModelFormMixin, self).form_valid(forms)
|
[
"def",
"form_valid",
"(",
"self",
",",
"forms",
")",
":",
"for",
"key",
",",
"form",
"in",
"forms",
".",
"items",
"(",
")",
":",
"setattr",
"(",
"self",
",",
"'{}_object'",
".",
"format",
"(",
"key",
")",
",",
"form",
".",
"save",
"(",
")",
")",
"return",
"super",
"(",
"MultipleModelFormMixin",
",",
"self",
")",
".",
"form_valid",
"(",
"forms",
")"
] |
If the form is valid, save the associated model.
|
[
"If",
"the",
"form",
"is",
"valid",
"save",
"the",
"associated",
"model",
"."
] |
5c6fd89824224f1de1be79ccd9a149f025af1b8f
|
https://github.com/s1s5/django_busybody/blob/5c6fd89824224f1de1be79ccd9a149f025af1b8f/django_busybody/views.py#L155-L161
|
240,536
|
rsalmaso/django-fluo
|
fluo/admin/sites.py
|
AdminSite.view_on_site
|
def view_on_site(self, request, content_type_id, object_id):
"""
Redirect to an object's page based on a content-type ID and an object ID.
"""
# Look up the object, making sure it's got a get_absolute_url() function.
try:
content_type = ContentType.objects.get(pk=content_type_id)
if not content_type.model_class():
raise Http404(_("Content type %(ct_id)s object has no associated model") % {
'ct_id': content_type_id,
})
obj = content_type.get_object_for_this_type(pk=object_id)
except (ObjectDoesNotExist, ValueError):
raise Http404(_("Content type %(ct_id)s object %(obj_id)s doesn't exist") % {
'ct_id': content_type_id,
'obj_id': object_id,
})
try:
get_absolute_url = obj.get_absolute_url
except AttributeError:
raise Http404(_("%(ct_name)s objects don't have a get_absolute_url() method") % {
'ct_name': content_type.name,
})
absurl = get_absolute_url()
return HttpResponseRedirect(absurl)
|
python
|
def view_on_site(self, request, content_type_id, object_id):
"""
Redirect to an object's page based on a content-type ID and an object ID.
"""
# Look up the object, making sure it's got a get_absolute_url() function.
try:
content_type = ContentType.objects.get(pk=content_type_id)
if not content_type.model_class():
raise Http404(_("Content type %(ct_id)s object has no associated model") % {
'ct_id': content_type_id,
})
obj = content_type.get_object_for_this_type(pk=object_id)
except (ObjectDoesNotExist, ValueError):
raise Http404(_("Content type %(ct_id)s object %(obj_id)s doesn't exist") % {
'ct_id': content_type_id,
'obj_id': object_id,
})
try:
get_absolute_url = obj.get_absolute_url
except AttributeError:
raise Http404(_("%(ct_name)s objects don't have a get_absolute_url() method") % {
'ct_name': content_type.name,
})
absurl = get_absolute_url()
return HttpResponseRedirect(absurl)
|
[
"def",
"view_on_site",
"(",
"self",
",",
"request",
",",
"content_type_id",
",",
"object_id",
")",
":",
"# Look up the object, making sure it's got a get_absolute_url() function.",
"try",
":",
"content_type",
"=",
"ContentType",
".",
"objects",
".",
"get",
"(",
"pk",
"=",
"content_type_id",
")",
"if",
"not",
"content_type",
".",
"model_class",
"(",
")",
":",
"raise",
"Http404",
"(",
"_",
"(",
"\"Content type %(ct_id)s object has no associated model\"",
")",
"%",
"{",
"'ct_id'",
":",
"content_type_id",
",",
"}",
")",
"obj",
"=",
"content_type",
".",
"get_object_for_this_type",
"(",
"pk",
"=",
"object_id",
")",
"except",
"(",
"ObjectDoesNotExist",
",",
"ValueError",
")",
":",
"raise",
"Http404",
"(",
"_",
"(",
"\"Content type %(ct_id)s object %(obj_id)s doesn't exist\"",
")",
"%",
"{",
"'ct_id'",
":",
"content_type_id",
",",
"'obj_id'",
":",
"object_id",
",",
"}",
")",
"try",
":",
"get_absolute_url",
"=",
"obj",
".",
"get_absolute_url",
"except",
"AttributeError",
":",
"raise",
"Http404",
"(",
"_",
"(",
"\"%(ct_name)s objects don't have a get_absolute_url() method\"",
")",
"%",
"{",
"'ct_name'",
":",
"content_type",
".",
"name",
",",
"}",
")",
"absurl",
"=",
"get_absolute_url",
"(",
")",
"return",
"HttpResponseRedirect",
"(",
"absurl",
")"
] |
Redirect to an object's page based on a content-type ID and an object ID.
|
[
"Redirect",
"to",
"an",
"object",
"s",
"page",
"based",
"on",
"a",
"content",
"-",
"type",
"ID",
"and",
"an",
"object",
"ID",
"."
] |
1321c1e7d6a912108f79be02a9e7f2108c57f89f
|
https://github.com/rsalmaso/django-fluo/blob/1321c1e7d6a912108f79be02a9e7f2108c57f89f/fluo/admin/sites.py#L49-L75
|
240,537
|
uw-it-aca/uw-restclients-graderoster
|
uw_sws_graderoster/__init__.py
|
get_graderoster
|
def get_graderoster(section, instructor, requestor):
"""
Returns a restclients.GradeRoster for the passed Section model and
instructor Person.
"""
label = GradeRoster(section=section,
instructor=instructor).graderoster_label()
url = "{}/{}".format(graderoster_url, encode_section_label(label))
headers = {"Accept": "text/xhtml",
"Connection": "keep-alive",
"X-UW-Act-as": requestor.uwnetid}
response = SWS_GradeRoster_DAO().getURL(url, headers)
if response.status != 200:
root = etree.fromstring(response.data)
msg = root.find(".//*[@class='status_description']").text.strip()
raise DataFailureException(url, response.status, msg)
return GradeRoster(data=etree.fromstring(response.data.strip()),
section=section,
instructor=instructor)
|
python
|
def get_graderoster(section, instructor, requestor):
"""
Returns a restclients.GradeRoster for the passed Section model and
instructor Person.
"""
label = GradeRoster(section=section,
instructor=instructor).graderoster_label()
url = "{}/{}".format(graderoster_url, encode_section_label(label))
headers = {"Accept": "text/xhtml",
"Connection": "keep-alive",
"X-UW-Act-as": requestor.uwnetid}
response = SWS_GradeRoster_DAO().getURL(url, headers)
if response.status != 200:
root = etree.fromstring(response.data)
msg = root.find(".//*[@class='status_description']").text.strip()
raise DataFailureException(url, response.status, msg)
return GradeRoster(data=etree.fromstring(response.data.strip()),
section=section,
instructor=instructor)
|
[
"def",
"get_graderoster",
"(",
"section",
",",
"instructor",
",",
"requestor",
")",
":",
"label",
"=",
"GradeRoster",
"(",
"section",
"=",
"section",
",",
"instructor",
"=",
"instructor",
")",
".",
"graderoster_label",
"(",
")",
"url",
"=",
"\"{}/{}\"",
".",
"format",
"(",
"graderoster_url",
",",
"encode_section_label",
"(",
"label",
")",
")",
"headers",
"=",
"{",
"\"Accept\"",
":",
"\"text/xhtml\"",
",",
"\"Connection\"",
":",
"\"keep-alive\"",
",",
"\"X-UW-Act-as\"",
":",
"requestor",
".",
"uwnetid",
"}",
"response",
"=",
"SWS_GradeRoster_DAO",
"(",
")",
".",
"getURL",
"(",
"url",
",",
"headers",
")",
"if",
"response",
".",
"status",
"!=",
"200",
":",
"root",
"=",
"etree",
".",
"fromstring",
"(",
"response",
".",
"data",
")",
"msg",
"=",
"root",
".",
"find",
"(",
"\".//*[@class='status_description']\"",
")",
".",
"text",
".",
"strip",
"(",
")",
"raise",
"DataFailureException",
"(",
"url",
",",
"response",
".",
"status",
",",
"msg",
")",
"return",
"GradeRoster",
"(",
"data",
"=",
"etree",
".",
"fromstring",
"(",
"response",
".",
"data",
".",
"strip",
"(",
")",
")",
",",
"section",
"=",
"section",
",",
"instructor",
"=",
"instructor",
")"
] |
Returns a restclients.GradeRoster for the passed Section model and
instructor Person.
|
[
"Returns",
"a",
"restclients",
".",
"GradeRoster",
"for",
"the",
"passed",
"Section",
"model",
"and",
"instructor",
"Person",
"."
] |
1e41553eb7363765af60e87223ca9d22cf6c9187
|
https://github.com/uw-it-aca/uw-restclients-graderoster/blob/1e41553eb7363765af60e87223ca9d22cf6c9187/uw_sws_graderoster/__init__.py#L10-L31
|
240,538
|
uw-it-aca/uw-restclients-graderoster
|
uw_sws_graderoster/__init__.py
|
update_graderoster
|
def update_graderoster(graderoster, requestor):
"""
Updates the graderoster resource for the passed restclients.GradeRoster
model. A new restclients.GradeRoster is returned, representing the
document returned from the update request.
"""
label = graderoster.graderoster_label()
url = "{}/{}".format(graderoster_url, encode_section_label(label))
headers = {"Content-Type": "application/xhtml+xml",
"Connection": "keep-alive",
"X-UW-Act-as": requestor.uwnetid}
body = graderoster.xhtml()
response = SWS_GradeRoster_DAO().putURL(url, headers, body)
if response.status != 200:
root = etree.fromstring(response.data)
msg = root.find(".//*[@class='status_description']").text.strip()
raise DataFailureException(url, response.status, msg)
return GradeRoster(data=etree.fromstring(response.data.strip()),
section=graderoster.section,
instructor=graderoster.instructor)
|
python
|
def update_graderoster(graderoster, requestor):
"""
Updates the graderoster resource for the passed restclients.GradeRoster
model. A new restclients.GradeRoster is returned, representing the
document returned from the update request.
"""
label = graderoster.graderoster_label()
url = "{}/{}".format(graderoster_url, encode_section_label(label))
headers = {"Content-Type": "application/xhtml+xml",
"Connection": "keep-alive",
"X-UW-Act-as": requestor.uwnetid}
body = graderoster.xhtml()
response = SWS_GradeRoster_DAO().putURL(url, headers, body)
if response.status != 200:
root = etree.fromstring(response.data)
msg = root.find(".//*[@class='status_description']").text.strip()
raise DataFailureException(url, response.status, msg)
return GradeRoster(data=etree.fromstring(response.data.strip()),
section=graderoster.section,
instructor=graderoster.instructor)
|
[
"def",
"update_graderoster",
"(",
"graderoster",
",",
"requestor",
")",
":",
"label",
"=",
"graderoster",
".",
"graderoster_label",
"(",
")",
"url",
"=",
"\"{}/{}\"",
".",
"format",
"(",
"graderoster_url",
",",
"encode_section_label",
"(",
"label",
")",
")",
"headers",
"=",
"{",
"\"Content-Type\"",
":",
"\"application/xhtml+xml\"",
",",
"\"Connection\"",
":",
"\"keep-alive\"",
",",
"\"X-UW-Act-as\"",
":",
"requestor",
".",
"uwnetid",
"}",
"body",
"=",
"graderoster",
".",
"xhtml",
"(",
")",
"response",
"=",
"SWS_GradeRoster_DAO",
"(",
")",
".",
"putURL",
"(",
"url",
",",
"headers",
",",
"body",
")",
"if",
"response",
".",
"status",
"!=",
"200",
":",
"root",
"=",
"etree",
".",
"fromstring",
"(",
"response",
".",
"data",
")",
"msg",
"=",
"root",
".",
"find",
"(",
"\".//*[@class='status_description']\"",
")",
".",
"text",
".",
"strip",
"(",
")",
"raise",
"DataFailureException",
"(",
"url",
",",
"response",
".",
"status",
",",
"msg",
")",
"return",
"GradeRoster",
"(",
"data",
"=",
"etree",
".",
"fromstring",
"(",
"response",
".",
"data",
".",
"strip",
"(",
")",
")",
",",
"section",
"=",
"graderoster",
".",
"section",
",",
"instructor",
"=",
"graderoster",
".",
"instructor",
")"
] |
Updates the graderoster resource for the passed restclients.GradeRoster
model. A new restclients.GradeRoster is returned, representing the
document returned from the update request.
|
[
"Updates",
"the",
"graderoster",
"resource",
"for",
"the",
"passed",
"restclients",
".",
"GradeRoster",
"model",
".",
"A",
"new",
"restclients",
".",
"GradeRoster",
"is",
"returned",
"representing",
"the",
"document",
"returned",
"from",
"the",
"update",
"request",
"."
] |
1e41553eb7363765af60e87223ca9d22cf6c9187
|
https://github.com/uw-it-aca/uw-restclients-graderoster/blob/1e41553eb7363765af60e87223ca9d22cf6c9187/uw_sws_graderoster/__init__.py#L34-L56
|
240,539
|
zenotech/SysScribe
|
sysscribe/__init__.py
|
netdevs
|
def netdevs():
''' RX and TX bytes for each of the network devices '''
with open('/proc/net/dev') as f:
net_dump = f.readlines()
device_data={}
data = namedtuple('data',['rx','tx'])
for line in net_dump[2:]:
line = line.split(':')
if line[0].strip() != 'lo':
device_data[line[0].strip()] = data(float(line[1].split()[0])/(1024.0*1024.0),
float(line[1].split()[8])/(1024.0*1024.0))
return device_data
|
python
|
def netdevs():
''' RX and TX bytes for each of the network devices '''
with open('/proc/net/dev') as f:
net_dump = f.readlines()
device_data={}
data = namedtuple('data',['rx','tx'])
for line in net_dump[2:]:
line = line.split(':')
if line[0].strip() != 'lo':
device_data[line[0].strip()] = data(float(line[1].split()[0])/(1024.0*1024.0),
float(line[1].split()[8])/(1024.0*1024.0))
return device_data
|
[
"def",
"netdevs",
"(",
")",
":",
"with",
"open",
"(",
"'/proc/net/dev'",
")",
"as",
"f",
":",
"net_dump",
"=",
"f",
".",
"readlines",
"(",
")",
"device_data",
"=",
"{",
"}",
"data",
"=",
"namedtuple",
"(",
"'data'",
",",
"[",
"'rx'",
",",
"'tx'",
"]",
")",
"for",
"line",
"in",
"net_dump",
"[",
"2",
":",
"]",
":",
"line",
"=",
"line",
".",
"split",
"(",
"':'",
")",
"if",
"line",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"!=",
"'lo'",
":",
"device_data",
"[",
"line",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"]",
"=",
"data",
"(",
"float",
"(",
"line",
"[",
"1",
"]",
".",
"split",
"(",
")",
"[",
"0",
"]",
")",
"/",
"(",
"1024.0",
"*",
"1024.0",
")",
",",
"float",
"(",
"line",
"[",
"1",
"]",
".",
"split",
"(",
")",
"[",
"8",
"]",
")",
"/",
"(",
"1024.0",
"*",
"1024.0",
")",
")",
"return",
"device_data"
] |
RX and TX bytes for each of the network devices
|
[
"RX",
"and",
"TX",
"bytes",
"for",
"each",
"of",
"the",
"network",
"devices"
] |
8cabfc9718e7ccc6d217fbcfc158dd255b28c9b1
|
https://github.com/zenotech/SysScribe/blob/8cabfc9718e7ccc6d217fbcfc158dd255b28c9b1/sysscribe/__init__.py#L58-L72
|
240,540
|
almcc/cinder-data
|
robot/libs/CinderDataLibrary.py
|
CinderDataLibrary.get_model
|
def get_model(self, model, model_id):
"""Get a single model from the server.
Args:
model (string): The class as a string.
model_id (string): The integer ID as a string.
Returns:
:class:`cinder_data.model.CinderModel`: A instance of the model.
"""
return self._store.find_record(self._get_model_class(model), int(model_id))
|
python
|
def get_model(self, model, model_id):
"""Get a single model from the server.
Args:
model (string): The class as a string.
model_id (string): The integer ID as a string.
Returns:
:class:`cinder_data.model.CinderModel`: A instance of the model.
"""
return self._store.find_record(self._get_model_class(model), int(model_id))
|
[
"def",
"get_model",
"(",
"self",
",",
"model",
",",
"model_id",
")",
":",
"return",
"self",
".",
"_store",
".",
"find_record",
"(",
"self",
".",
"_get_model_class",
"(",
"model",
")",
",",
"int",
"(",
"model_id",
")",
")"
] |
Get a single model from the server.
Args:
model (string): The class as a string.
model_id (string): The integer ID as a string.
Returns:
:class:`cinder_data.model.CinderModel`: A instance of the model.
|
[
"Get",
"a",
"single",
"model",
"from",
"the",
"server",
"."
] |
4159a5186c4b4fc32354749892e86130530f6ec5
|
https://github.com/almcc/cinder-data/blob/4159a5186c4b4fc32354749892e86130530f6ec5/robot/libs/CinderDataLibrary.py#L38-L48
|
240,541
|
almcc/cinder-data
|
robot/libs/CinderDataLibrary.py
|
CinderDataLibrary.get_models
|
def get_models(self, model, page=None):
"""Get all the models from the server.
Args:
model (string): The class as a string.
page (string, optional): The page number as a string
Returns:
list: A list of instances of the requested model.
"""
if page is not None:
return self._store.find_all(self._get_model_class(model), params={'page': int(page)})
else:
return self._store.find_all(self._get_model_class(model))
|
python
|
def get_models(self, model, page=None):
"""Get all the models from the server.
Args:
model (string): The class as a string.
page (string, optional): The page number as a string
Returns:
list: A list of instances of the requested model.
"""
if page is not None:
return self._store.find_all(self._get_model_class(model), params={'page': int(page)})
else:
return self._store.find_all(self._get_model_class(model))
|
[
"def",
"get_models",
"(",
"self",
",",
"model",
",",
"page",
"=",
"None",
")",
":",
"if",
"page",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_store",
".",
"find_all",
"(",
"self",
".",
"_get_model_class",
"(",
"model",
")",
",",
"params",
"=",
"{",
"'page'",
":",
"int",
"(",
"page",
")",
"}",
")",
"else",
":",
"return",
"self",
".",
"_store",
".",
"find_all",
"(",
"self",
".",
"_get_model_class",
"(",
"model",
")",
")"
] |
Get all the models from the server.
Args:
model (string): The class as a string.
page (string, optional): The page number as a string
Returns:
list: A list of instances of the requested model.
|
[
"Get",
"all",
"the",
"models",
"from",
"the",
"server",
"."
] |
4159a5186c4b4fc32354749892e86130530f6ec5
|
https://github.com/almcc/cinder-data/blob/4159a5186c4b4fc32354749892e86130530f6ec5/robot/libs/CinderDataLibrary.py#L50-L63
|
240,542
|
dicaso/leopard
|
leopard/utils.py
|
makeFigFromFile
|
def makeFigFromFile(filename,*args,**kwargs):
"""
Renders an image in a matplotlib figure, so it can be added to reports
args and kwargs are passed to plt.subplots
"""
import matplotlib.pyplot as plt
img = plt.imread(filename)
fig,ax = plt.subplots(*args,**kwargs)
ax.axis('off')
ax.imshow(img)
return fig
|
python
|
def makeFigFromFile(filename,*args,**kwargs):
"""
Renders an image in a matplotlib figure, so it can be added to reports
args and kwargs are passed to plt.subplots
"""
import matplotlib.pyplot as plt
img = plt.imread(filename)
fig,ax = plt.subplots(*args,**kwargs)
ax.axis('off')
ax.imshow(img)
return fig
|
[
"def",
"makeFigFromFile",
"(",
"filename",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"img",
"=",
"plt",
".",
"imread",
"(",
"filename",
")",
"fig",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"ax",
".",
"axis",
"(",
"'off'",
")",
"ax",
".",
"imshow",
"(",
"img",
")",
"return",
"fig"
] |
Renders an image in a matplotlib figure, so it can be added to reports
args and kwargs are passed to plt.subplots
|
[
"Renders",
"an",
"image",
"in",
"a",
"matplotlib",
"figure",
"so",
"it",
"can",
"be",
"added",
"to",
"reports",
"args",
"and",
"kwargs",
"are",
"passed",
"to",
"plt",
".",
"subplots"
] |
ee9f45251aaacd1e453b135b419f4f0b50fb036e
|
https://github.com/dicaso/leopard/blob/ee9f45251aaacd1e453b135b419f4f0b50fb036e/leopard/utils.py#L8-L18
|
240,543
|
Raynes/quarantine
|
quarantine/cdc.py
|
CDC.list_exes
|
def list_exes(self):
"""List the installed executables by this project."""
return [path.join(self.env_bin, f)
for f
in os.listdir(self.env_bin)]
|
python
|
def list_exes(self):
"""List the installed executables by this project."""
return [path.join(self.env_bin, f)
for f
in os.listdir(self.env_bin)]
|
[
"def",
"list_exes",
"(",
"self",
")",
":",
"return",
"[",
"path",
".",
"join",
"(",
"self",
".",
"env_bin",
",",
"f",
")",
"for",
"f",
"in",
"os",
".",
"listdir",
"(",
"self",
".",
"env_bin",
")",
"]"
] |
List the installed executables by this project.
|
[
"List",
"the",
"installed",
"executables",
"by",
"this",
"project",
"."
] |
742a318fcb7d34dbdf4fac388daff03a36872d8b
|
https://github.com/Raynes/quarantine/blob/742a318fcb7d34dbdf4fac388daff03a36872d8b/quarantine/cdc.py#L32-L36
|
240,544
|
Raynes/quarantine
|
quarantine/cdc.py
|
CDC.create_env
|
def create_env(self):
"""Create a virtual environment."""
virtualenv(self.env, _err=sys.stderr)
os.mkdir(self.env_bin)
|
python
|
def create_env(self):
"""Create a virtual environment."""
virtualenv(self.env, _err=sys.stderr)
os.mkdir(self.env_bin)
|
[
"def",
"create_env",
"(",
"self",
")",
":",
"virtualenv",
"(",
"self",
".",
"env",
",",
"_err",
"=",
"sys",
".",
"stderr",
")",
"os",
".",
"mkdir",
"(",
"self",
".",
"env_bin",
")"
] |
Create a virtual environment.
|
[
"Create",
"a",
"virtual",
"environment",
"."
] |
742a318fcb7d34dbdf4fac388daff03a36872d8b
|
https://github.com/Raynes/quarantine/blob/742a318fcb7d34dbdf4fac388daff03a36872d8b/quarantine/cdc.py#L38-L41
|
240,545
|
Raynes/quarantine
|
quarantine/cdc.py
|
CDC.install_program
|
def install_program(self, extra_args):
"""Install the app to the virtualenv"""
pip = Command(path.join(self.env, 'bin', 'pip'))
args = ['install', self.raw_name,
'--install-option', '--install-scripts={}'
.format(self.env_bin)] + list(extra_args)
print_pretty("<BOLD>pip {}<END>\n".format(' '.join(args)))
pip(args, _out=sys.stdout, _err=sys.stderr)
print('')
|
python
|
def install_program(self, extra_args):
"""Install the app to the virtualenv"""
pip = Command(path.join(self.env, 'bin', 'pip'))
args = ['install', self.raw_name,
'--install-option', '--install-scripts={}'
.format(self.env_bin)] + list(extra_args)
print_pretty("<BOLD>pip {}<END>\n".format(' '.join(args)))
pip(args, _out=sys.stdout, _err=sys.stderr)
print('')
|
[
"def",
"install_program",
"(",
"self",
",",
"extra_args",
")",
":",
"pip",
"=",
"Command",
"(",
"path",
".",
"join",
"(",
"self",
".",
"env",
",",
"'bin'",
",",
"'pip'",
")",
")",
"args",
"=",
"[",
"'install'",
",",
"self",
".",
"raw_name",
",",
"'--install-option'",
",",
"'--install-scripts={}'",
".",
"format",
"(",
"self",
".",
"env_bin",
")",
"]",
"+",
"list",
"(",
"extra_args",
")",
"print_pretty",
"(",
"\"<BOLD>pip {}<END>\\n\"",
".",
"format",
"(",
"' '",
".",
"join",
"(",
"args",
")",
")",
")",
"pip",
"(",
"args",
",",
"_out",
"=",
"sys",
".",
"stdout",
",",
"_err",
"=",
"sys",
".",
"stderr",
")",
"print",
"(",
"''",
")"
] |
Install the app to the virtualenv
|
[
"Install",
"the",
"app",
"to",
"the",
"virtualenv"
] |
742a318fcb7d34dbdf4fac388daff03a36872d8b
|
https://github.com/Raynes/quarantine/blob/742a318fcb7d34dbdf4fac388daff03a36872d8b/quarantine/cdc.py#L47-L55
|
240,546
|
Raynes/quarantine
|
quarantine/cdc.py
|
CDC.create_links
|
def create_links(self):
"""Create links to installed scripts in the virtualenv's
bin directory to our bin directory.
"""
for link in self.list_exes():
print_pretty("<FG_BLUE>Creating link for {}...<END>".format(link))
os.symlink(link, path.join(ENV_BIN, path.basename(link)))
|
python
|
def create_links(self):
"""Create links to installed scripts in the virtualenv's
bin directory to our bin directory.
"""
for link in self.list_exes():
print_pretty("<FG_BLUE>Creating link for {}...<END>".format(link))
os.symlink(link, path.join(ENV_BIN, path.basename(link)))
|
[
"def",
"create_links",
"(",
"self",
")",
":",
"for",
"link",
"in",
"self",
".",
"list_exes",
"(",
")",
":",
"print_pretty",
"(",
"\"<FG_BLUE>Creating link for {}...<END>\"",
".",
"format",
"(",
"link",
")",
")",
"os",
".",
"symlink",
"(",
"link",
",",
"path",
".",
"join",
"(",
"ENV_BIN",
",",
"path",
".",
"basename",
"(",
"link",
")",
")",
")"
] |
Create links to installed scripts in the virtualenv's
bin directory to our bin directory.
|
[
"Create",
"links",
"to",
"installed",
"scripts",
"in",
"the",
"virtualenv",
"s",
"bin",
"directory",
"to",
"our",
"bin",
"directory",
"."
] |
742a318fcb7d34dbdf4fac388daff03a36872d8b
|
https://github.com/Raynes/quarantine/blob/742a318fcb7d34dbdf4fac388daff03a36872d8b/quarantine/cdc.py#L57-L64
|
240,547
|
Raynes/quarantine
|
quarantine/cdc.py
|
CDC.remove_links
|
def remove_links(self):
"""Remove links from our bin."""
for link in self.list_exes():
link = path.join(ENV_BIN, path.basename(link))
print_pretty("<FG_BLUE>Removing link {}...<END>".format(link))
os.remove(link)
|
python
|
def remove_links(self):
"""Remove links from our bin."""
for link in self.list_exes():
link = path.join(ENV_BIN, path.basename(link))
print_pretty("<FG_BLUE>Removing link {}...<END>".format(link))
os.remove(link)
|
[
"def",
"remove_links",
"(",
"self",
")",
":",
"for",
"link",
"in",
"self",
".",
"list_exes",
"(",
")",
":",
"link",
"=",
"path",
".",
"join",
"(",
"ENV_BIN",
",",
"path",
".",
"basename",
"(",
"link",
")",
")",
"print_pretty",
"(",
"\"<FG_BLUE>Removing link {}...<END>\"",
".",
"format",
"(",
"link",
")",
")",
"os",
".",
"remove",
"(",
"link",
")"
] |
Remove links from our bin.
|
[
"Remove",
"links",
"from",
"our",
"bin",
"."
] |
742a318fcb7d34dbdf4fac388daff03a36872d8b
|
https://github.com/Raynes/quarantine/blob/742a318fcb7d34dbdf4fac388daff03a36872d8b/quarantine/cdc.py#L66-L71
|
240,548
|
Raynes/quarantine
|
quarantine/cdc.py
|
CDC.uninstall
|
def uninstall(self):
"""Uninstall the environment and links."""
if path.isdir(self.env_bin):
self.remove_links()
if path.isdir(self.env):
print_pretty("<FG_BLUE>Removing env {}...<END>".format(self.env))
shutil.rmtree(self.env)
|
python
|
def uninstall(self):
"""Uninstall the environment and links."""
if path.isdir(self.env_bin):
self.remove_links()
if path.isdir(self.env):
print_pretty("<FG_BLUE>Removing env {}...<END>".format(self.env))
shutil.rmtree(self.env)
|
[
"def",
"uninstall",
"(",
"self",
")",
":",
"if",
"path",
".",
"isdir",
"(",
"self",
".",
"env_bin",
")",
":",
"self",
".",
"remove_links",
"(",
")",
"if",
"path",
".",
"isdir",
"(",
"self",
".",
"env",
")",
":",
"print_pretty",
"(",
"\"<FG_BLUE>Removing env {}...<END>\"",
".",
"format",
"(",
"self",
".",
"env",
")",
")",
"shutil",
".",
"rmtree",
"(",
"self",
".",
"env",
")"
] |
Uninstall the environment and links.
|
[
"Uninstall",
"the",
"environment",
"and",
"links",
"."
] |
742a318fcb7d34dbdf4fac388daff03a36872d8b
|
https://github.com/Raynes/quarantine/blob/742a318fcb7d34dbdf4fac388daff03a36872d8b/quarantine/cdc.py#L73-L79
|
240,549
|
Raynes/quarantine
|
quarantine/cdc.py
|
CDC.install
|
def install(self, pip_args=None):
"""Install the program and put links in place."""
if path.isdir(self.env):
print_pretty("<FG_RED>This seems to already be installed.<END>")
else:
print_pretty("<FG_BLUE>Creating environment {}...<END>\n".format(self.env))
self.create_env()
self.install_program(pip_args)
self.create_links()
|
python
|
def install(self, pip_args=None):
"""Install the program and put links in place."""
if path.isdir(self.env):
print_pretty("<FG_RED>This seems to already be installed.<END>")
else:
print_pretty("<FG_BLUE>Creating environment {}...<END>\n".format(self.env))
self.create_env()
self.install_program(pip_args)
self.create_links()
|
[
"def",
"install",
"(",
"self",
",",
"pip_args",
"=",
"None",
")",
":",
"if",
"path",
".",
"isdir",
"(",
"self",
".",
"env",
")",
":",
"print_pretty",
"(",
"\"<FG_RED>This seems to already be installed.<END>\"",
")",
"else",
":",
"print_pretty",
"(",
"\"<FG_BLUE>Creating environment {}...<END>\\n\"",
".",
"format",
"(",
"self",
".",
"env",
")",
")",
"self",
".",
"create_env",
"(",
")",
"self",
".",
"install_program",
"(",
"pip_args",
")",
"self",
".",
"create_links",
"(",
")"
] |
Install the program and put links in place.
|
[
"Install",
"the",
"program",
"and",
"put",
"links",
"in",
"place",
"."
] |
742a318fcb7d34dbdf4fac388daff03a36872d8b
|
https://github.com/Raynes/quarantine/blob/742a318fcb7d34dbdf4fac388daff03a36872d8b/quarantine/cdc.py#L81-L89
|
240,550
|
dev-pipeline/dev-pipeline-git
|
lib/devpipeline_git/git.py
|
_make_git
|
def _make_git(config_info):
"""This function initializes and Git SCM tool object."""
git_args = {}
def _add_value(value, key):
args_key, args_value = _GIT_ARG_FNS[key](value)
git_args[args_key] = args_value
devpipeline_core.toolsupport.args_builder("git", config_info, _GIT_ARGS, _add_value)
if git_args.get("uri"):
return devpipeline_scm.make_simple_scm(Git(git_args), config_info)
else:
raise Exception("No git uri ({})".format(config_info.config.name))
|
python
|
def _make_git(config_info):
"""This function initializes and Git SCM tool object."""
git_args = {}
def _add_value(value, key):
args_key, args_value = _GIT_ARG_FNS[key](value)
git_args[args_key] = args_value
devpipeline_core.toolsupport.args_builder("git", config_info, _GIT_ARGS, _add_value)
if git_args.get("uri"):
return devpipeline_scm.make_simple_scm(Git(git_args), config_info)
else:
raise Exception("No git uri ({})".format(config_info.config.name))
|
[
"def",
"_make_git",
"(",
"config_info",
")",
":",
"git_args",
"=",
"{",
"}",
"def",
"_add_value",
"(",
"value",
",",
"key",
")",
":",
"args_key",
",",
"args_value",
"=",
"_GIT_ARG_FNS",
"[",
"key",
"]",
"(",
"value",
")",
"git_args",
"[",
"args_key",
"]",
"=",
"args_value",
"devpipeline_core",
".",
"toolsupport",
".",
"args_builder",
"(",
"\"git\"",
",",
"config_info",
",",
"_GIT_ARGS",
",",
"_add_value",
")",
"if",
"git_args",
".",
"get",
"(",
"\"uri\"",
")",
":",
"return",
"devpipeline_scm",
".",
"make_simple_scm",
"(",
"Git",
"(",
"git_args",
")",
",",
"config_info",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"No git uri ({})\"",
".",
"format",
"(",
"config_info",
".",
"config",
".",
"name",
")",
")"
] |
This function initializes and Git SCM tool object.
|
[
"This",
"function",
"initializes",
"and",
"Git",
"SCM",
"tool",
"object",
"."
] |
b604f1f89402502b8ad858f4f834baa9467ef380
|
https://github.com/dev-pipeline/dev-pipeline-git/blob/b604f1f89402502b8ad858f4f834baa9467ef380/lib/devpipeline_git/git.py#L143-L155
|
240,551
|
dev-pipeline/dev-pipeline-git
|
lib/devpipeline_git/git.py
|
Git.checkout
|
def checkout(self, repo_dir, shared_dir, **kwargs):
"""This function checks out code from a Git SCM server."""
del kwargs
args = []
for checkout_fn in _CHECKOUT_ARG_BUILDERS:
args.extend(checkout_fn(shared_dir, repo_dir, self._args))
return args
|
python
|
def checkout(self, repo_dir, shared_dir, **kwargs):
"""This function checks out code from a Git SCM server."""
del kwargs
args = []
for checkout_fn in _CHECKOUT_ARG_BUILDERS:
args.extend(checkout_fn(shared_dir, repo_dir, self._args))
return args
|
[
"def",
"checkout",
"(",
"self",
",",
"repo_dir",
",",
"shared_dir",
",",
"*",
"*",
"kwargs",
")",
":",
"del",
"kwargs",
"args",
"=",
"[",
"]",
"for",
"checkout_fn",
"in",
"_CHECKOUT_ARG_BUILDERS",
":",
"args",
".",
"extend",
"(",
"checkout_fn",
"(",
"shared_dir",
",",
"repo_dir",
",",
"self",
".",
"_args",
")",
")",
"return",
"args"
] |
This function checks out code from a Git SCM server.
|
[
"This",
"function",
"checks",
"out",
"code",
"from",
"a",
"Git",
"SCM",
"server",
"."
] |
b604f1f89402502b8ad858f4f834baa9467ef380
|
https://github.com/dev-pipeline/dev-pipeline-git/blob/b604f1f89402502b8ad858f4f834baa9467ef380/lib/devpipeline_git/git.py#L118-L124
|
240,552
|
dev-pipeline/dev-pipeline-git
|
lib/devpipeline_git/git.py
|
Git.update
|
def update(self, repo_dir, **kwargs):
"""This function updates an existing checkout of source code."""
del kwargs
rev = self._args.get("revision")
if rev:
return [{"args": ["git", "checkout", rev], "cwd": repo_dir}] + _ff_command(
rev, repo_dir
)
return None
|
python
|
def update(self, repo_dir, **kwargs):
"""This function updates an existing checkout of source code."""
del kwargs
rev = self._args.get("revision")
if rev:
return [{"args": ["git", "checkout", rev], "cwd": repo_dir}] + _ff_command(
rev, repo_dir
)
return None
|
[
"def",
"update",
"(",
"self",
",",
"repo_dir",
",",
"*",
"*",
"kwargs",
")",
":",
"del",
"kwargs",
"rev",
"=",
"self",
".",
"_args",
".",
"get",
"(",
"\"revision\"",
")",
"if",
"rev",
":",
"return",
"[",
"{",
"\"args\"",
":",
"[",
"\"git\"",
",",
"\"checkout\"",
",",
"rev",
"]",
",",
"\"cwd\"",
":",
"repo_dir",
"}",
"]",
"+",
"_ff_command",
"(",
"rev",
",",
"repo_dir",
")",
"return",
"None"
] |
This function updates an existing checkout of source code.
|
[
"This",
"function",
"updates",
"an",
"existing",
"checkout",
"of",
"source",
"code",
"."
] |
b604f1f89402502b8ad858f4f834baa9467ef380
|
https://github.com/dev-pipeline/dev-pipeline-git/blob/b604f1f89402502b8ad858f4f834baa9467ef380/lib/devpipeline_git/git.py#L126-L134
|
240,553
|
CitrineInformatics/dftparse
|
dftparse/wien2k/absorp_parser.py
|
_parse_absorption
|
def _parse_absorption(line, lines):
"""Parse Energy, Re sigma xx, Re sigma zz, absorp xx, absorp zz"""
split_line = line.split()
energy = float(split_line[0])
re_sigma_xx = float(split_line[1])
re_sigma_zz = float(split_line[2])
absorp_xx = float(split_line[3])
absorp_zz = float(split_line[4])
return {"energy": energy, "re_sigma_xx": re_sigma_xx, "re_sigma_zz": re_sigma_zz,
"absorp_xx": absorp_xx, "absorp_zz": absorp_zz}
|
python
|
def _parse_absorption(line, lines):
"""Parse Energy, Re sigma xx, Re sigma zz, absorp xx, absorp zz"""
split_line = line.split()
energy = float(split_line[0])
re_sigma_xx = float(split_line[1])
re_sigma_zz = float(split_line[2])
absorp_xx = float(split_line[3])
absorp_zz = float(split_line[4])
return {"energy": energy, "re_sigma_xx": re_sigma_xx, "re_sigma_zz": re_sigma_zz,
"absorp_xx": absorp_xx, "absorp_zz": absorp_zz}
|
[
"def",
"_parse_absorption",
"(",
"line",
",",
"lines",
")",
":",
"split_line",
"=",
"line",
".",
"split",
"(",
")",
"energy",
"=",
"float",
"(",
"split_line",
"[",
"0",
"]",
")",
"re_sigma_xx",
"=",
"float",
"(",
"split_line",
"[",
"1",
"]",
")",
"re_sigma_zz",
"=",
"float",
"(",
"split_line",
"[",
"2",
"]",
")",
"absorp_xx",
"=",
"float",
"(",
"split_line",
"[",
"3",
"]",
")",
"absorp_zz",
"=",
"float",
"(",
"split_line",
"[",
"4",
"]",
")",
"return",
"{",
"\"energy\"",
":",
"energy",
",",
"\"re_sigma_xx\"",
":",
"re_sigma_xx",
",",
"\"re_sigma_zz\"",
":",
"re_sigma_zz",
",",
"\"absorp_xx\"",
":",
"absorp_xx",
",",
"\"absorp_zz\"",
":",
"absorp_zz",
"}"
] |
Parse Energy, Re sigma xx, Re sigma zz, absorp xx, absorp zz
|
[
"Parse",
"Energy",
"Re",
"sigma",
"xx",
"Re",
"sigma",
"zz",
"absorp",
"xx",
"absorp",
"zz"
] |
53a1bf19945cf1c195d6af9beccb3d1b7f4a4c1d
|
https://github.com/CitrineInformatics/dftparse/blob/53a1bf19945cf1c195d6af9beccb3d1b7f4a4c1d/dftparse/wien2k/absorp_parser.py#L4-L16
|
240,554
|
jalanb/pysyte
|
pysyte/text_streams.py
|
args
|
def args(parsed_args, name=None):
"""Interpret parsed args to streams"""
strings = parsed_args.arg_strings(name)
files = [s for s in strings if os.path.isfile(s)]
if files:
streams = [open(f) for f in files]
else:
streams = []
if getattr(parsed_args, 'paste', not files):
streams.append(clipboard_stream())
if getattr(parsed_args, 'stdin', False):
streams.append(sys.stdin)
elif not streams:
streams = [sys.stdin]
return streams
|
python
|
def args(parsed_args, name=None):
"""Interpret parsed args to streams"""
strings = parsed_args.arg_strings(name)
files = [s for s in strings if os.path.isfile(s)]
if files:
streams = [open(f) for f in files]
else:
streams = []
if getattr(parsed_args, 'paste', not files):
streams.append(clipboard_stream())
if getattr(parsed_args, 'stdin', False):
streams.append(sys.stdin)
elif not streams:
streams = [sys.stdin]
return streams
|
[
"def",
"args",
"(",
"parsed_args",
",",
"name",
"=",
"None",
")",
":",
"strings",
"=",
"parsed_args",
".",
"arg_strings",
"(",
"name",
")",
"files",
"=",
"[",
"s",
"for",
"s",
"in",
"strings",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"s",
")",
"]",
"if",
"files",
":",
"streams",
"=",
"[",
"open",
"(",
"f",
")",
"for",
"f",
"in",
"files",
"]",
"else",
":",
"streams",
"=",
"[",
"]",
"if",
"getattr",
"(",
"parsed_args",
",",
"'paste'",
",",
"not",
"files",
")",
":",
"streams",
".",
"append",
"(",
"clipboard_stream",
"(",
")",
")",
"if",
"getattr",
"(",
"parsed_args",
",",
"'stdin'",
",",
"False",
")",
":",
"streams",
".",
"append",
"(",
"sys",
".",
"stdin",
")",
"elif",
"not",
"streams",
":",
"streams",
"=",
"[",
"sys",
".",
"stdin",
"]",
"return",
"streams"
] |
Interpret parsed args to streams
|
[
"Interpret",
"parsed",
"args",
"to",
"streams"
] |
4e278101943d1ceb1a6bcaf6ddc72052ecf13114
|
https://github.com/jalanb/pysyte/blob/4e278101943d1ceb1a6bcaf6ddc72052ecf13114/pysyte/text_streams.py#L23-L37
|
240,555
|
brycepg/mand
|
mand/mand.py
|
main
|
def main(argv=None):
"""Execute each module in the same interpreter.
Args:
argv: Each item of argv will be treated as a separate
module with potential arguments
each item may be a string or a sequence of strings.
If a given argument is a string, then treat string as
shell arguments and split accordingly.
If the given argument is a tuple or list, then assume
that the given arguments are already parsed.
The first item of each argument should be a module or module path
"""
if argv is None:
argv = sys.argv[1:]
args = _get_parser().parse_args(argv)
mand(args.module_seq)
|
python
|
def main(argv=None):
"""Execute each module in the same interpreter.
Args:
argv: Each item of argv will be treated as a separate
module with potential arguments
each item may be a string or a sequence of strings.
If a given argument is a string, then treat string as
shell arguments and split accordingly.
If the given argument is a tuple or list, then assume
that the given arguments are already parsed.
The first item of each argument should be a module or module path
"""
if argv is None:
argv = sys.argv[1:]
args = _get_parser().parse_args(argv)
mand(args.module_seq)
|
[
"def",
"main",
"(",
"argv",
"=",
"None",
")",
":",
"if",
"argv",
"is",
"None",
":",
"argv",
"=",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
"args",
"=",
"_get_parser",
"(",
")",
".",
"parse_args",
"(",
"argv",
")",
"mand",
"(",
"args",
".",
"module_seq",
")"
] |
Execute each module in the same interpreter.
Args:
argv: Each item of argv will be treated as a separate
module with potential arguments
each item may be a string or a sequence of strings.
If a given argument is a string, then treat string as
shell arguments and split accordingly.
If the given argument is a tuple or list, then assume
that the given arguments are already parsed.
The first item of each argument should be a module or module path
|
[
"Execute",
"each",
"module",
"in",
"the",
"same",
"interpreter",
"."
] |
3a8f9c1cc1bbe217aaca8c805113285ab02ecb7c
|
https://github.com/brycepg/mand/blob/3a8f9c1cc1bbe217aaca8c805113285ab02ecb7c/mand/mand.py#L12-L28
|
240,556
|
brycepg/mand
|
mand/mand.py
|
call_multiple_modules
|
def call_multiple_modules(module_gen):
"""Call each module
module_gen should be a iterator
"""
for args_seq in module_gen:
module_name_or_path = args_seq[0]
with replace_sys_args(args_seq):
if re.match(VALID_PACKAGE_RE, module_name_or_path):
runpy.run_module(module_name_or_path,
run_name='__main__')
else:
runpy.run_path(module_name_or_path,
run_name='__main__')
|
python
|
def call_multiple_modules(module_gen):
"""Call each module
module_gen should be a iterator
"""
for args_seq in module_gen:
module_name_or_path = args_seq[0]
with replace_sys_args(args_seq):
if re.match(VALID_PACKAGE_RE, module_name_or_path):
runpy.run_module(module_name_or_path,
run_name='__main__')
else:
runpy.run_path(module_name_or_path,
run_name='__main__')
|
[
"def",
"call_multiple_modules",
"(",
"module_gen",
")",
":",
"for",
"args_seq",
"in",
"module_gen",
":",
"module_name_or_path",
"=",
"args_seq",
"[",
"0",
"]",
"with",
"replace_sys_args",
"(",
"args_seq",
")",
":",
"if",
"re",
".",
"match",
"(",
"VALID_PACKAGE_RE",
",",
"module_name_or_path",
")",
":",
"runpy",
".",
"run_module",
"(",
"module_name_or_path",
",",
"run_name",
"=",
"'__main__'",
")",
"else",
":",
"runpy",
".",
"run_path",
"(",
"module_name_or_path",
",",
"run_name",
"=",
"'__main__'",
")"
] |
Call each module
module_gen should be a iterator
|
[
"Call",
"each",
"module"
] |
3a8f9c1cc1bbe217aaca8c805113285ab02ecb7c
|
https://github.com/brycepg/mand/blob/3a8f9c1cc1bbe217aaca8c805113285ab02ecb7c/mand/mand.py#L50-L63
|
240,557
|
brycepg/mand
|
mand/mand.py
|
replace_sys_args
|
def replace_sys_args(new_args):
"""Temporarily replace sys.argv with current arguments
Restores sys.argv upon exit of the context manager.
"""
# Replace sys.argv arguments
# for module import
old_args = sys.argv
sys.argv = new_args
try:
yield
finally:
sys.argv = old_args
|
python
|
def replace_sys_args(new_args):
"""Temporarily replace sys.argv with current arguments
Restores sys.argv upon exit of the context manager.
"""
# Replace sys.argv arguments
# for module import
old_args = sys.argv
sys.argv = new_args
try:
yield
finally:
sys.argv = old_args
|
[
"def",
"replace_sys_args",
"(",
"new_args",
")",
":",
"# Replace sys.argv arguments",
"# for module import",
"old_args",
"=",
"sys",
".",
"argv",
"sys",
".",
"argv",
"=",
"new_args",
"try",
":",
"yield",
"finally",
":",
"sys",
".",
"argv",
"=",
"old_args"
] |
Temporarily replace sys.argv with current arguments
Restores sys.argv upon exit of the context manager.
|
[
"Temporarily",
"replace",
"sys",
".",
"argv",
"with",
"current",
"arguments"
] |
3a8f9c1cc1bbe217aaca8c805113285ab02ecb7c
|
https://github.com/brycepg/mand/blob/3a8f9c1cc1bbe217aaca8c805113285ab02ecb7c/mand/mand.py#L66-L78
|
240,558
|
edwards-lab/libGWAS
|
libgwas/bed_parser.py
|
Parser.load_bim
|
def load_bim(self, map3=False):
"""Basic marker details loading.
(chr, rsid, gen. dist, pos, allelel 1, allele2)
:param map3: When true, ignore the genetic distance column
:return: None
"""
cols = [0, 1, 3, 4, 5]
if map3:
cols = [0, 1, 2, 3, 4]
logging.info("Loading file: %s" % self.bim_file)
val = sys_call('wc -l %s' % (self.bim_file))[0][0].split()[0]
marker_count = int(val)
self.markers = numpy.zeros((marker_count, 2), dtype=int)
self.rsids = []
self.alleles = []
with open(self.bim_file) as file:
index = 0
for line in file:
if map3:
chr, rsid, pos, al1, al2 = line.strip().split()
else:
chr, rsid, gd, pos, al1, al2 = line.strip().split()
self.markers[index, 0] = int(chr)
self.markers[index, 1] = int(pos)
self.alleles.append([al1, al2])
self.rsids.append(rsid)
index += 1
self.locus_count = self.markers.shape[0]
|
python
|
def load_bim(self, map3=False):
"""Basic marker details loading.
(chr, rsid, gen. dist, pos, allelel 1, allele2)
:param map3: When true, ignore the genetic distance column
:return: None
"""
cols = [0, 1, 3, 4, 5]
if map3:
cols = [0, 1, 2, 3, 4]
logging.info("Loading file: %s" % self.bim_file)
val = sys_call('wc -l %s' % (self.bim_file))[0][0].split()[0]
marker_count = int(val)
self.markers = numpy.zeros((marker_count, 2), dtype=int)
self.rsids = []
self.alleles = []
with open(self.bim_file) as file:
index = 0
for line in file:
if map3:
chr, rsid, pos, al1, al2 = line.strip().split()
else:
chr, rsid, gd, pos, al1, al2 = line.strip().split()
self.markers[index, 0] = int(chr)
self.markers[index, 1] = int(pos)
self.alleles.append([al1, al2])
self.rsids.append(rsid)
index += 1
self.locus_count = self.markers.shape[0]
|
[
"def",
"load_bim",
"(",
"self",
",",
"map3",
"=",
"False",
")",
":",
"cols",
"=",
"[",
"0",
",",
"1",
",",
"3",
",",
"4",
",",
"5",
"]",
"if",
"map3",
":",
"cols",
"=",
"[",
"0",
",",
"1",
",",
"2",
",",
"3",
",",
"4",
"]",
"logging",
".",
"info",
"(",
"\"Loading file: %s\"",
"%",
"self",
".",
"bim_file",
")",
"val",
"=",
"sys_call",
"(",
"'wc -l %s'",
"%",
"(",
"self",
".",
"bim_file",
")",
")",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"split",
"(",
")",
"[",
"0",
"]",
"marker_count",
"=",
"int",
"(",
"val",
")",
"self",
".",
"markers",
"=",
"numpy",
".",
"zeros",
"(",
"(",
"marker_count",
",",
"2",
")",
",",
"dtype",
"=",
"int",
")",
"self",
".",
"rsids",
"=",
"[",
"]",
"self",
".",
"alleles",
"=",
"[",
"]",
"with",
"open",
"(",
"self",
".",
"bim_file",
")",
"as",
"file",
":",
"index",
"=",
"0",
"for",
"line",
"in",
"file",
":",
"if",
"map3",
":",
"chr",
",",
"rsid",
",",
"pos",
",",
"al1",
",",
"al2",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"else",
":",
"chr",
",",
"rsid",
",",
"gd",
",",
"pos",
",",
"al1",
",",
"al2",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"self",
".",
"markers",
"[",
"index",
",",
"0",
"]",
"=",
"int",
"(",
"chr",
")",
"self",
".",
"markers",
"[",
"index",
",",
"1",
"]",
"=",
"int",
"(",
"pos",
")",
"self",
".",
"alleles",
".",
"append",
"(",
"[",
"al1",
",",
"al2",
"]",
")",
"self",
".",
"rsids",
".",
"append",
"(",
"rsid",
")",
"index",
"+=",
"1",
"self",
".",
"locus_count",
"=",
"self",
".",
"markers",
".",
"shape",
"[",
"0",
"]"
] |
Basic marker details loading.
(chr, rsid, gen. dist, pos, allelel 1, allele2)
:param map3: When true, ignore the genetic distance column
:return: None
|
[
"Basic",
"marker",
"details",
"loading",
"."
] |
d68c9a083d443dfa5d7c5112de29010909cfe23f
|
https://github.com/edwards-lab/libGWAS/blob/d68c9a083d443dfa5d7c5112de29010909cfe23f/libgwas/bed_parser.py#L162-L192
|
240,559
|
edwards-lab/libGWAS
|
libgwas/bed_parser.py
|
Parser.init_genotype_file
|
def init_genotype_file(self):
"""Resets the bed file and preps it for starting at the start of the \
genotype data
Returns to beginning of file and reads the version so that it points \
to first marker's info
:return: None
"""
self.genotype_file.seek(0)
buff = self.genotype_file.read(3)
version = 0
magic, data_format = buff.unpack("HB", version)
return magic, data_format
|
python
|
def init_genotype_file(self):
"""Resets the bed file and preps it for starting at the start of the \
genotype data
Returns to beginning of file and reads the version so that it points \
to first marker's info
:return: None
"""
self.genotype_file.seek(0)
buff = self.genotype_file.read(3)
version = 0
magic, data_format = buff.unpack("HB", version)
return magic, data_format
|
[
"def",
"init_genotype_file",
"(",
"self",
")",
":",
"self",
".",
"genotype_file",
".",
"seek",
"(",
"0",
")",
"buff",
"=",
"self",
".",
"genotype_file",
".",
"read",
"(",
"3",
")",
"version",
"=",
"0",
"magic",
",",
"data_format",
"=",
"buff",
".",
"unpack",
"(",
"\"HB\"",
",",
"version",
")",
"return",
"magic",
",",
"data_format"
] |
Resets the bed file and preps it for starting at the start of the \
genotype data
Returns to beginning of file and reads the version so that it points \
to first marker's info
:return: None
|
[
"Resets",
"the",
"bed",
"file",
"and",
"preps",
"it",
"for",
"starting",
"at",
"the",
"start",
"of",
"the",
"\\",
"genotype",
"data"
] |
d68c9a083d443dfa5d7c5112de29010909cfe23f
|
https://github.com/edwards-lab/libGWAS/blob/d68c9a083d443dfa5d7c5112de29010909cfe23f/libgwas/bed_parser.py#L194-L208
|
240,560
|
edwards-lab/libGWAS
|
libgwas/bed_parser.py
|
Parser.extract_genotypes
|
def extract_genotypes(self, bytes):
"""Extracts encoded genotype data from binary formatted file.
:param bytes: array of bytes pulled from the .bed file
:return: standard python list containing the genotype data
Only ind_count genotypes will be returned (even if there are
a handful of extra pairs present).
"""
genotypes = []
for b in bytes:
for i in range(0, 4):
v = ((b>>(i*2)) & 3)
genotypes.append(self.geno_conversions[v])
return genotypes[0:self.ind_count]
|
python
|
def extract_genotypes(self, bytes):
"""Extracts encoded genotype data from binary formatted file.
:param bytes: array of bytes pulled from the .bed file
:return: standard python list containing the genotype data
Only ind_count genotypes will be returned (even if there are
a handful of extra pairs present).
"""
genotypes = []
for b in bytes:
for i in range(0, 4):
v = ((b>>(i*2)) & 3)
genotypes.append(self.geno_conversions[v])
return genotypes[0:self.ind_count]
|
[
"def",
"extract_genotypes",
"(",
"self",
",",
"bytes",
")",
":",
"genotypes",
"=",
"[",
"]",
"for",
"b",
"in",
"bytes",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"4",
")",
":",
"v",
"=",
"(",
"(",
"b",
">>",
"(",
"i",
"*",
"2",
")",
")",
"&",
"3",
")",
"genotypes",
".",
"append",
"(",
"self",
".",
"geno_conversions",
"[",
"v",
"]",
")",
"return",
"genotypes",
"[",
"0",
":",
"self",
".",
"ind_count",
"]"
] |
Extracts encoded genotype data from binary formatted file.
:param bytes: array of bytes pulled from the .bed file
:return: standard python list containing the genotype data
Only ind_count genotypes will be returned (even if there are
a handful of extra pairs present).
|
[
"Extracts",
"encoded",
"genotype",
"data",
"from",
"binary",
"formatted",
"file",
"."
] |
d68c9a083d443dfa5d7c5112de29010909cfe23f
|
https://github.com/edwards-lab/libGWAS/blob/d68c9a083d443dfa5d7c5112de29010909cfe23f/libgwas/bed_parser.py#L210-L226
|
240,561
|
edwards-lab/libGWAS
|
libgwas/bed_parser.py
|
Parser.filter_missing
|
def filter_missing(self):
"""Filter out individuals and SNPs that have too many missing to be \
considered
:return: None
This must be run prior to actually parsing the genotypes because it
initializes the following instance members:
* ind_mask
* total_locus_count
* locus_count
* data_parser.boundary (adds loci with too much missingness)
"""
missing = None
locus_count = 0
logging.info("Sorting out missing data from genotype data")
# Filter out individuals according to missingness
self.genotype_file.seek(0)
magic, data_format = struct.unpack("<HB", self.genotype_file.read(3))
if data_format != 1:
Exit(("MVTEST is currently unable to read data formatted as " +
"individual major. You must regenerate your data in SNP major"+
" format. "))
self.bytes_per_read = self.ind_count / 4
if self.ind_count % 4 > 0:
self.bytes_per_read += 1
self.fmt_string = "<" + "B"*self.bytes_per_read
last_chr = -1
for index in range(self.locus_count):
buffer = struct.unpack(self.fmt_string,
self.genotype_file.read(self.bytes_per_read))
chr, pos = self.markers[index]
rsid = self.rsids[index]
if DataParser.boundary.TestBoundary(chr, pos, rsid):
if last_chr != chr:
sys.stdout.flush()
last_chr = chr
genotypes = numpy.array(self.extract_genotypes(buffer),
dtype=numpy.int8)
locus_count += 1
if missing is None:
missing = numpy.zeros(genotypes.shape[0], dtype='int8')
missing += 0+(genotypes==DataParser.missing_storage)
max_missing = DataParser.ind_miss_tol * locus_count
dropped_individuals = 0+(max_missing<missing)
self.ind_mask = self.ind_mask|dropped_individuals
valid_individuals = numpy.sum(self.ind_mask==0)
max_missing = DataParser.snp_miss_tol * valid_individuals
# We can't merge these two iterations since we need to know which
# individuals to consider for filtering on MAF
dropped_snps = []
self.genotype_file.seek(0)
self.genotype_file.read(3)
self.total_locus_count = self.locus_count
self.locus_count = 0
last_chr = -1
for index in range(self.total_locus_count):
buffer = struct.unpack(self.fmt_string,
self.genotype_file.read(self.bytes_per_read))
genotypes = numpy.ma.MaskedArray(self.extract_genotypes(buffer),
self.ind_mask).compressed()
chr, pos = self.markers[index]
rsid = self.rsids[index]
if DataParser.boundary.TestBoundary(chr, pos, rsid):
if last_chr != chr:
sys.stdout.flush()
last_chr = chr
missing = numpy.sum(0+(genotypes==DataParser.missing_storage))
if missing > max_missing:
DataParser.boundary.dropped_snps[int(chr)].add(int(pos))
dropped_snps.append(rsid)
else:
self.locus_count += 1
|
python
|
def filter_missing(self):
"""Filter out individuals and SNPs that have too many missing to be \
considered
:return: None
This must be run prior to actually parsing the genotypes because it
initializes the following instance members:
* ind_mask
* total_locus_count
* locus_count
* data_parser.boundary (adds loci with too much missingness)
"""
missing = None
locus_count = 0
logging.info("Sorting out missing data from genotype data")
# Filter out individuals according to missingness
self.genotype_file.seek(0)
magic, data_format = struct.unpack("<HB", self.genotype_file.read(3))
if data_format != 1:
Exit(("MVTEST is currently unable to read data formatted as " +
"individual major. You must regenerate your data in SNP major"+
" format. "))
self.bytes_per_read = self.ind_count / 4
if self.ind_count % 4 > 0:
self.bytes_per_read += 1
self.fmt_string = "<" + "B"*self.bytes_per_read
last_chr = -1
for index in range(self.locus_count):
buffer = struct.unpack(self.fmt_string,
self.genotype_file.read(self.bytes_per_read))
chr, pos = self.markers[index]
rsid = self.rsids[index]
if DataParser.boundary.TestBoundary(chr, pos, rsid):
if last_chr != chr:
sys.stdout.flush()
last_chr = chr
genotypes = numpy.array(self.extract_genotypes(buffer),
dtype=numpy.int8)
locus_count += 1
if missing is None:
missing = numpy.zeros(genotypes.shape[0], dtype='int8')
missing += 0+(genotypes==DataParser.missing_storage)
max_missing = DataParser.ind_miss_tol * locus_count
dropped_individuals = 0+(max_missing<missing)
self.ind_mask = self.ind_mask|dropped_individuals
valid_individuals = numpy.sum(self.ind_mask==0)
max_missing = DataParser.snp_miss_tol * valid_individuals
# We can't merge these two iterations since we need to know which
# individuals to consider for filtering on MAF
dropped_snps = []
self.genotype_file.seek(0)
self.genotype_file.read(3)
self.total_locus_count = self.locus_count
self.locus_count = 0
last_chr = -1
for index in range(self.total_locus_count):
buffer = struct.unpack(self.fmt_string,
self.genotype_file.read(self.bytes_per_read))
genotypes = numpy.ma.MaskedArray(self.extract_genotypes(buffer),
self.ind_mask).compressed()
chr, pos = self.markers[index]
rsid = self.rsids[index]
if DataParser.boundary.TestBoundary(chr, pos, rsid):
if last_chr != chr:
sys.stdout.flush()
last_chr = chr
missing = numpy.sum(0+(genotypes==DataParser.missing_storage))
if missing > max_missing:
DataParser.boundary.dropped_snps[int(chr)].add(int(pos))
dropped_snps.append(rsid)
else:
self.locus_count += 1
|
[
"def",
"filter_missing",
"(",
"self",
")",
":",
"missing",
"=",
"None",
"locus_count",
"=",
"0",
"logging",
".",
"info",
"(",
"\"Sorting out missing data from genotype data\"",
")",
"# Filter out individuals according to missingness",
"self",
".",
"genotype_file",
".",
"seek",
"(",
"0",
")",
"magic",
",",
"data_format",
"=",
"struct",
".",
"unpack",
"(",
"\"<HB\"",
",",
"self",
".",
"genotype_file",
".",
"read",
"(",
"3",
")",
")",
"if",
"data_format",
"!=",
"1",
":",
"Exit",
"(",
"(",
"\"MVTEST is currently unable to read data formatted as \"",
"+",
"\"individual major. You must regenerate your data in SNP major\"",
"+",
"\" format. \"",
")",
")",
"self",
".",
"bytes_per_read",
"=",
"self",
".",
"ind_count",
"/",
"4",
"if",
"self",
".",
"ind_count",
"%",
"4",
">",
"0",
":",
"self",
".",
"bytes_per_read",
"+=",
"1",
"self",
".",
"fmt_string",
"=",
"\"<\"",
"+",
"\"B\"",
"*",
"self",
".",
"bytes_per_read",
"last_chr",
"=",
"-",
"1",
"for",
"index",
"in",
"range",
"(",
"self",
".",
"locus_count",
")",
":",
"buffer",
"=",
"struct",
".",
"unpack",
"(",
"self",
".",
"fmt_string",
",",
"self",
".",
"genotype_file",
".",
"read",
"(",
"self",
".",
"bytes_per_read",
")",
")",
"chr",
",",
"pos",
"=",
"self",
".",
"markers",
"[",
"index",
"]",
"rsid",
"=",
"self",
".",
"rsids",
"[",
"index",
"]",
"if",
"DataParser",
".",
"boundary",
".",
"TestBoundary",
"(",
"chr",
",",
"pos",
",",
"rsid",
")",
":",
"if",
"last_chr",
"!=",
"chr",
":",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"last_chr",
"=",
"chr",
"genotypes",
"=",
"numpy",
".",
"array",
"(",
"self",
".",
"extract_genotypes",
"(",
"buffer",
")",
",",
"dtype",
"=",
"numpy",
".",
"int8",
")",
"locus_count",
"+=",
"1",
"if",
"missing",
"is",
"None",
":",
"missing",
"=",
"numpy",
".",
"zeros",
"(",
"genotypes",
".",
"shape",
"[",
"0",
"]",
",",
"dtype",
"=",
"'int8'",
")",
"missing",
"+=",
"0",
"+",
"(",
"genotypes",
"==",
"DataParser",
".",
"missing_storage",
")",
"max_missing",
"=",
"DataParser",
".",
"ind_miss_tol",
"*",
"locus_count",
"dropped_individuals",
"=",
"0",
"+",
"(",
"max_missing",
"<",
"missing",
")",
"self",
".",
"ind_mask",
"=",
"self",
".",
"ind_mask",
"|",
"dropped_individuals",
"valid_individuals",
"=",
"numpy",
".",
"sum",
"(",
"self",
".",
"ind_mask",
"==",
"0",
")",
"max_missing",
"=",
"DataParser",
".",
"snp_miss_tol",
"*",
"valid_individuals",
"# We can't merge these two iterations since we need to know which",
"# individuals to consider for filtering on MAF",
"dropped_snps",
"=",
"[",
"]",
"self",
".",
"genotype_file",
".",
"seek",
"(",
"0",
")",
"self",
".",
"genotype_file",
".",
"read",
"(",
"3",
")",
"self",
".",
"total_locus_count",
"=",
"self",
".",
"locus_count",
"self",
".",
"locus_count",
"=",
"0",
"last_chr",
"=",
"-",
"1",
"for",
"index",
"in",
"range",
"(",
"self",
".",
"total_locus_count",
")",
":",
"buffer",
"=",
"struct",
".",
"unpack",
"(",
"self",
".",
"fmt_string",
",",
"self",
".",
"genotype_file",
".",
"read",
"(",
"self",
".",
"bytes_per_read",
")",
")",
"genotypes",
"=",
"numpy",
".",
"ma",
".",
"MaskedArray",
"(",
"self",
".",
"extract_genotypes",
"(",
"buffer",
")",
",",
"self",
".",
"ind_mask",
")",
".",
"compressed",
"(",
")",
"chr",
",",
"pos",
"=",
"self",
".",
"markers",
"[",
"index",
"]",
"rsid",
"=",
"self",
".",
"rsids",
"[",
"index",
"]",
"if",
"DataParser",
".",
"boundary",
".",
"TestBoundary",
"(",
"chr",
",",
"pos",
",",
"rsid",
")",
":",
"if",
"last_chr",
"!=",
"chr",
":",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"last_chr",
"=",
"chr",
"missing",
"=",
"numpy",
".",
"sum",
"(",
"0",
"+",
"(",
"genotypes",
"==",
"DataParser",
".",
"missing_storage",
")",
")",
"if",
"missing",
">",
"max_missing",
":",
"DataParser",
".",
"boundary",
".",
"dropped_snps",
"[",
"int",
"(",
"chr",
")",
"]",
".",
"add",
"(",
"int",
"(",
"pos",
")",
")",
"dropped_snps",
".",
"append",
"(",
"rsid",
")",
"else",
":",
"self",
".",
"locus_count",
"+=",
"1"
] |
Filter out individuals and SNPs that have too many missing to be \
considered
:return: None
This must be run prior to actually parsing the genotypes because it
initializes the following instance members:
* ind_mask
* total_locus_count
* locus_count
* data_parser.boundary (adds loci with too much missingness)
|
[
"Filter",
"out",
"individuals",
"and",
"SNPs",
"that",
"have",
"too",
"many",
"missing",
"to",
"be",
"\\",
"considered"
] |
d68c9a083d443dfa5d7c5112de29010909cfe23f
|
https://github.com/edwards-lab/libGWAS/blob/d68c9a083d443dfa5d7c5112de29010909cfe23f/libgwas/bed_parser.py#L229-L313
|
240,562
|
jalanb/pysyte
|
pysyte/bash/shell.py
|
full_path
|
def full_path(path):
"""Get the real path, expanding links and bashisms"""
return os.path.realpath(os.path.expanduser(os.path.expandvars(path)))
|
python
|
def full_path(path):
"""Get the real path, expanding links and bashisms"""
return os.path.realpath(os.path.expanduser(os.path.expandvars(path)))
|
[
"def",
"full_path",
"(",
"path",
")",
":",
"return",
"os",
".",
"path",
".",
"realpath",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"os",
".",
"path",
".",
"expandvars",
"(",
"path",
")",
")",
")"
] |
Get the real path, expanding links and bashisms
|
[
"Get",
"the",
"real",
"path",
"expanding",
"links",
"and",
"bashisms"
] |
4e278101943d1ceb1a6bcaf6ddc72052ecf13114
|
https://github.com/jalanb/pysyte/blob/4e278101943d1ceb1a6bcaf6ddc72052ecf13114/pysyte/bash/shell.py#L69-L71
|
240,563
|
CitrineInformatics/dftparse
|
dftparse/core.py
|
BlockParser.parse
|
def parse(self, generator):
"""Parse an iterable source of strings into a generator"""
gen = iter(generator)
for line in gen:
block = {}
for rule in self.rules:
if rule[0](line):
block = rule[1](line, gen)
break
yield block
|
python
|
def parse(self, generator):
"""Parse an iterable source of strings into a generator"""
gen = iter(generator)
for line in gen:
block = {}
for rule in self.rules:
if rule[0](line):
block = rule[1](line, gen)
break
yield block
|
[
"def",
"parse",
"(",
"self",
",",
"generator",
")",
":",
"gen",
"=",
"iter",
"(",
"generator",
")",
"for",
"line",
"in",
"gen",
":",
"block",
"=",
"{",
"}",
"for",
"rule",
"in",
"self",
".",
"rules",
":",
"if",
"rule",
"[",
"0",
"]",
"(",
"line",
")",
":",
"block",
"=",
"rule",
"[",
"1",
"]",
"(",
"line",
",",
"gen",
")",
"break",
"yield",
"block"
] |
Parse an iterable source of strings into a generator
|
[
"Parse",
"an",
"iterable",
"source",
"of",
"strings",
"into",
"a",
"generator"
] |
53a1bf19945cf1c195d6af9beccb3d1b7f4a4c1d
|
https://github.com/CitrineInformatics/dftparse/blob/53a1bf19945cf1c195d6af9beccb3d1b7f4a4c1d/dftparse/core.py#L15-L24
|
240,564
|
edwards-lab/libGWAS
|
libgwas/mach_parser.py
|
Parser.ReportConfiguration
|
def ReportConfiguration(self, file):
"""Report the configuration details for logging purposes.
:param file: Destination for report details
:return: None
"""
global encodingpar
print >> file, libgwas.BuildReportLine("MACH_ARCHIVES", "")
if self.chrpos_encoding:
print >> file, libgwas.BuildReportLine("MACH_CHRPOS",
("IDS expected to be in format chr:pos" +
" SNP boundary filters might not work " +
"(see manual for details)"))
else:
print >> file, libgwas.BuildReportLine("MACH_CHRPOS",
"IDs are treated like RSIDs")
idx = 0
for arch in self.archives[0:]:
print >> file, libgwas.BuildReportLine("", "%s:%s" % (self.archives[idx], self.info_files[idx]))
idx += 1
print >> file, libgwas.BuildReportLine("ENCODING", ["Dosage", "Genotype"][encoding])
|
python
|
def ReportConfiguration(self, file):
"""Report the configuration details for logging purposes.
:param file: Destination for report details
:return: None
"""
global encodingpar
print >> file, libgwas.BuildReportLine("MACH_ARCHIVES", "")
if self.chrpos_encoding:
print >> file, libgwas.BuildReportLine("MACH_CHRPOS",
("IDS expected to be in format chr:pos" +
" SNP boundary filters might not work " +
"(see manual for details)"))
else:
print >> file, libgwas.BuildReportLine("MACH_CHRPOS",
"IDs are treated like RSIDs")
idx = 0
for arch in self.archives[0:]:
print >> file, libgwas.BuildReportLine("", "%s:%s" % (self.archives[idx], self.info_files[idx]))
idx += 1
print >> file, libgwas.BuildReportLine("ENCODING", ["Dosage", "Genotype"][encoding])
|
[
"def",
"ReportConfiguration",
"(",
"self",
",",
"file",
")",
":",
"global",
"encodingpar",
"print",
">>",
"file",
",",
"libgwas",
".",
"BuildReportLine",
"(",
"\"MACH_ARCHIVES\"",
",",
"\"\"",
")",
"if",
"self",
".",
"chrpos_encoding",
":",
"print",
">>",
"file",
",",
"libgwas",
".",
"BuildReportLine",
"(",
"\"MACH_CHRPOS\"",
",",
"(",
"\"IDS expected to be in format chr:pos\"",
"+",
"\" SNP boundary filters might not work \"",
"+",
"\"(see manual for details)\"",
")",
")",
"else",
":",
"print",
">>",
"file",
",",
"libgwas",
".",
"BuildReportLine",
"(",
"\"MACH_CHRPOS\"",
",",
"\"IDs are treated like RSIDs\"",
")",
"idx",
"=",
"0",
"for",
"arch",
"in",
"self",
".",
"archives",
"[",
"0",
":",
"]",
":",
"print",
">>",
"file",
",",
"libgwas",
".",
"BuildReportLine",
"(",
"\"\"",
",",
"\"%s:%s\"",
"%",
"(",
"self",
".",
"archives",
"[",
"idx",
"]",
",",
"self",
".",
"info_files",
"[",
"idx",
"]",
")",
")",
"idx",
"+=",
"1",
"print",
">>",
"file",
",",
"libgwas",
".",
"BuildReportLine",
"(",
"\"ENCODING\"",
",",
"[",
"\"Dosage\"",
",",
"\"Genotype\"",
"]",
"[",
"encoding",
"]",
")"
] |
Report the configuration details for logging purposes.
:param file: Destination for report details
:return: None
|
[
"Report",
"the",
"configuration",
"details",
"for",
"logging",
"purposes",
"."
] |
d68c9a083d443dfa5d7c5112de29010909cfe23f
|
https://github.com/edwards-lab/libGWAS/blob/d68c9a083d443dfa5d7c5112de29010909cfe23f/libgwas/mach_parser.py#L147-L167
|
240,565
|
asyncdef/interfaces
|
asyncdef/interfaces/engine/icoroutine.py
|
ICoroutine.cancel
|
def cancel(
self,
identifier: typing.Any,
exc_type: typing.Optional[type]=None,
) -> bool:
"""Cancel an active coroutine and remove it from the schedule.
Args:
identifier (typing.Any): The identifier returned from add.
exc_type (typing.Optional[type]): The exception type to throw into
the coroutine on cancel. No exception is thrown if nothing is
given. Instead the coroutine is no longer processed.
Returns:
bool: True if the coroutine is cancelled. False if the identifier
is invalid or if the coroutine is complete.
"""
raise NotImplementedError()
|
python
|
def cancel(
self,
identifier: typing.Any,
exc_type: typing.Optional[type]=None,
) -> bool:
"""Cancel an active coroutine and remove it from the schedule.
Args:
identifier (typing.Any): The identifier returned from add.
exc_type (typing.Optional[type]): The exception type to throw into
the coroutine on cancel. No exception is thrown if nothing is
given. Instead the coroutine is no longer processed.
Returns:
bool: True if the coroutine is cancelled. False if the identifier
is invalid or if the coroutine is complete.
"""
raise NotImplementedError()
|
[
"def",
"cancel",
"(",
"self",
",",
"identifier",
":",
"typing",
".",
"Any",
",",
"exc_type",
":",
"typing",
".",
"Optional",
"[",
"type",
"]",
"=",
"None",
",",
")",
"->",
"bool",
":",
"raise",
"NotImplementedError",
"(",
")"
] |
Cancel an active coroutine and remove it from the schedule.
Args:
identifier (typing.Any): The identifier returned from add.
exc_type (typing.Optional[type]): The exception type to throw into
the coroutine on cancel. No exception is thrown if nothing is
given. Instead the coroutine is no longer processed.
Returns:
bool: True if the coroutine is cancelled. False if the identifier
is invalid or if the coroutine is complete.
|
[
"Cancel",
"an",
"active",
"coroutine",
"and",
"remove",
"it",
"from",
"the",
"schedule",
"."
] |
17c589c6ab158e3d9977a6d9da6d5ecd44844285
|
https://github.com/asyncdef/interfaces/blob/17c589c6ab158e3d9977a6d9da6d5ecd44844285/asyncdef/interfaces/engine/icoroutine.py#L29-L46
|
240,566
|
mayfield/shellish
|
shellish/command/supplement.py
|
ShellishHelpFormatter._fill_text
|
def _fill_text(self, text, width=None, indent=None):
""" Reflow text width while maintaining certain formatting
characteristics like double newlines and indented statements. """
assert isinstance(text, str)
if indent is None:
indent = NBSP * self._current_indent
assert isinstance(indent, str)
paragraphs = []
line_buf = []
pre = ''
for fragment in text.splitlines():
pre_indent = self.leadingws.match(fragment)
if not fragment or pre_indent:
if line_buf:
line = ' '.join(line_buf)
paragraphs.append((pre, self.whitespace.sub(' ', line)))
if not fragment:
paragraphs.append(('', ''))
else:
pre = pre_indent.group()
fragment = self.leadingws.sub('', fragment)
paragraphs.append((pre, fragment))
line_buf = []
pre = ''
else:
line_buf.append(fragment)
if line_buf:
line = ' '.join(line_buf)
paragraphs.append((pre, self.whitespace.sub(' ', line)))
indent = VTMLBuffer(indent)
nl = VTMLBuffer('\n')
if width is None:
width = self._width - len(indent)
lines = []
for pre, paragraph in paragraphs:
pwidth = width - len(pre)
lines.append(nl.join((indent + pre + x)
for x in vtmlrender(paragraph).wrap(pwidth)))
return nl.join(lines)
|
python
|
def _fill_text(self, text, width=None, indent=None):
""" Reflow text width while maintaining certain formatting
characteristics like double newlines and indented statements. """
assert isinstance(text, str)
if indent is None:
indent = NBSP * self._current_indent
assert isinstance(indent, str)
paragraphs = []
line_buf = []
pre = ''
for fragment in text.splitlines():
pre_indent = self.leadingws.match(fragment)
if not fragment or pre_indent:
if line_buf:
line = ' '.join(line_buf)
paragraphs.append((pre, self.whitespace.sub(' ', line)))
if not fragment:
paragraphs.append(('', ''))
else:
pre = pre_indent.group()
fragment = self.leadingws.sub('', fragment)
paragraphs.append((pre, fragment))
line_buf = []
pre = ''
else:
line_buf.append(fragment)
if line_buf:
line = ' '.join(line_buf)
paragraphs.append((pre, self.whitespace.sub(' ', line)))
indent = VTMLBuffer(indent)
nl = VTMLBuffer('\n')
if width is None:
width = self._width - len(indent)
lines = []
for pre, paragraph in paragraphs:
pwidth = width - len(pre)
lines.append(nl.join((indent + pre + x)
for x in vtmlrender(paragraph).wrap(pwidth)))
return nl.join(lines)
|
[
"def",
"_fill_text",
"(",
"self",
",",
"text",
",",
"width",
"=",
"None",
",",
"indent",
"=",
"None",
")",
":",
"assert",
"isinstance",
"(",
"text",
",",
"str",
")",
"if",
"indent",
"is",
"None",
":",
"indent",
"=",
"NBSP",
"*",
"self",
".",
"_current_indent",
"assert",
"isinstance",
"(",
"indent",
",",
"str",
")",
"paragraphs",
"=",
"[",
"]",
"line_buf",
"=",
"[",
"]",
"pre",
"=",
"''",
"for",
"fragment",
"in",
"text",
".",
"splitlines",
"(",
")",
":",
"pre_indent",
"=",
"self",
".",
"leadingws",
".",
"match",
"(",
"fragment",
")",
"if",
"not",
"fragment",
"or",
"pre_indent",
":",
"if",
"line_buf",
":",
"line",
"=",
"' '",
".",
"join",
"(",
"line_buf",
")",
"paragraphs",
".",
"append",
"(",
"(",
"pre",
",",
"self",
".",
"whitespace",
".",
"sub",
"(",
"' '",
",",
"line",
")",
")",
")",
"if",
"not",
"fragment",
":",
"paragraphs",
".",
"append",
"(",
"(",
"''",
",",
"''",
")",
")",
"else",
":",
"pre",
"=",
"pre_indent",
".",
"group",
"(",
")",
"fragment",
"=",
"self",
".",
"leadingws",
".",
"sub",
"(",
"''",
",",
"fragment",
")",
"paragraphs",
".",
"append",
"(",
"(",
"pre",
",",
"fragment",
")",
")",
"line_buf",
"=",
"[",
"]",
"pre",
"=",
"''",
"else",
":",
"line_buf",
".",
"append",
"(",
"fragment",
")",
"if",
"line_buf",
":",
"line",
"=",
"' '",
".",
"join",
"(",
"line_buf",
")",
"paragraphs",
".",
"append",
"(",
"(",
"pre",
",",
"self",
".",
"whitespace",
".",
"sub",
"(",
"' '",
",",
"line",
")",
")",
")",
"indent",
"=",
"VTMLBuffer",
"(",
"indent",
")",
"nl",
"=",
"VTMLBuffer",
"(",
"'\\n'",
")",
"if",
"width",
"is",
"None",
":",
"width",
"=",
"self",
".",
"_width",
"-",
"len",
"(",
"indent",
")",
"lines",
"=",
"[",
"]",
"for",
"pre",
",",
"paragraph",
"in",
"paragraphs",
":",
"pwidth",
"=",
"width",
"-",
"len",
"(",
"pre",
")",
"lines",
".",
"append",
"(",
"nl",
".",
"join",
"(",
"(",
"indent",
"+",
"pre",
"+",
"x",
")",
"for",
"x",
"in",
"vtmlrender",
"(",
"paragraph",
")",
".",
"wrap",
"(",
"pwidth",
")",
")",
")",
"return",
"nl",
".",
"join",
"(",
"lines",
")"
] |
Reflow text width while maintaining certain formatting
characteristics like double newlines and indented statements.
|
[
"Reflow",
"text",
"width",
"while",
"maintaining",
"certain",
"formatting",
"characteristics",
"like",
"double",
"newlines",
"and",
"indented",
"statements",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/command/supplement.py#L61-L99
|
240,567
|
mayfield/shellish
|
shellish/command/supplement.py
|
ShellishParser.bind_env
|
def bind_env(self, action, env):
""" Bind an environment variable to an argument action. The env
value will traditionally be something uppercase like `MYAPP_FOO_ARG`.
Note that the ENV value is assigned using `set_defaults()` and as such
it will be overridden if the argument is set via `parse_args()` """
if env in self._env_actions:
raise ValueError('Duplicate ENV variable: %s' % env)
self._env_actions[env] = action
action.env = env
|
python
|
def bind_env(self, action, env):
""" Bind an environment variable to an argument action. The env
value will traditionally be something uppercase like `MYAPP_FOO_ARG`.
Note that the ENV value is assigned using `set_defaults()` and as such
it will be overridden if the argument is set via `parse_args()` """
if env in self._env_actions:
raise ValueError('Duplicate ENV variable: %s' % env)
self._env_actions[env] = action
action.env = env
|
[
"def",
"bind_env",
"(",
"self",
",",
"action",
",",
"env",
")",
":",
"if",
"env",
"in",
"self",
".",
"_env_actions",
":",
"raise",
"ValueError",
"(",
"'Duplicate ENV variable: %s'",
"%",
"env",
")",
"self",
".",
"_env_actions",
"[",
"env",
"]",
"=",
"action",
"action",
".",
"env",
"=",
"env"
] |
Bind an environment variable to an argument action. The env
value will traditionally be something uppercase like `MYAPP_FOO_ARG`.
Note that the ENV value is assigned using `set_defaults()` and as such
it will be overridden if the argument is set via `parse_args()`
|
[
"Bind",
"an",
"environment",
"variable",
"to",
"an",
"argument",
"action",
".",
"The",
"env",
"value",
"will",
"traditionally",
"be",
"something",
"uppercase",
"like",
"MYAPP_FOO_ARG",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/command/supplement.py#L278-L287
|
240,568
|
mayfield/shellish
|
shellish/command/supplement.py
|
ShellishParser.print_help
|
def print_help(self, *args, **kwargs):
""" Add pager support to help output. """
if self._command is not None and self._command.session.allow_pager:
desc = 'Help\: %s' % '-'.join(self.prog.split())
pager_kwargs = self._command.get_pager_spec()
with paging.pager_redirect(desc, **pager_kwargs):
return super().print_help(*args, **kwargs)
else:
return super().print_help(*args, **kwargs)
|
python
|
def print_help(self, *args, **kwargs):
""" Add pager support to help output. """
if self._command is not None and self._command.session.allow_pager:
desc = 'Help\: %s' % '-'.join(self.prog.split())
pager_kwargs = self._command.get_pager_spec()
with paging.pager_redirect(desc, **pager_kwargs):
return super().print_help(*args, **kwargs)
else:
return super().print_help(*args, **kwargs)
|
[
"def",
"print_help",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_command",
"is",
"not",
"None",
"and",
"self",
".",
"_command",
".",
"session",
".",
"allow_pager",
":",
"desc",
"=",
"'Help\\: %s'",
"%",
"'-'",
".",
"join",
"(",
"self",
".",
"prog",
".",
"split",
"(",
")",
")",
"pager_kwargs",
"=",
"self",
".",
"_command",
".",
"get_pager_spec",
"(",
")",
"with",
"paging",
".",
"pager_redirect",
"(",
"desc",
",",
"*",
"*",
"pager_kwargs",
")",
":",
"return",
"super",
"(",
")",
".",
"print_help",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"return",
"super",
"(",
")",
".",
"print_help",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Add pager support to help output.
|
[
"Add",
"pager",
"support",
"to",
"help",
"output",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/command/supplement.py#L329-L337
|
240,569
|
mayfield/shellish
|
shellish/command/supplement.py
|
ShellishParser.add_subparsers
|
def add_subparsers(self, prog=None, **kwargs):
""" Supplement a proper `prog` keyword argument for the subprocessor.
The superclass technique for getting the `prog` value breaks because
of our VT100 escape codes injected by `format_help`. """
if prog is None:
# Use a non-shellish help formatter to avoid vt100 codes.
f = argparse.HelpFormatter(prog=self.prog)
f.add_usage(self.usage, self._get_positional_actions(),
self._mutually_exclusive_groups, '')
prog = f.format_help().strip()
return super().add_subparsers(prog=prog, **kwargs)
|
python
|
def add_subparsers(self, prog=None, **kwargs):
""" Supplement a proper `prog` keyword argument for the subprocessor.
The superclass technique for getting the `prog` value breaks because
of our VT100 escape codes injected by `format_help`. """
if prog is None:
# Use a non-shellish help formatter to avoid vt100 codes.
f = argparse.HelpFormatter(prog=self.prog)
f.add_usage(self.usage, self._get_positional_actions(),
self._mutually_exclusive_groups, '')
prog = f.format_help().strip()
return super().add_subparsers(prog=prog, **kwargs)
|
[
"def",
"add_subparsers",
"(",
"self",
",",
"prog",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"prog",
"is",
"None",
":",
"# Use a non-shellish help formatter to avoid vt100 codes.",
"f",
"=",
"argparse",
".",
"HelpFormatter",
"(",
"prog",
"=",
"self",
".",
"prog",
")",
"f",
".",
"add_usage",
"(",
"self",
".",
"usage",
",",
"self",
".",
"_get_positional_actions",
"(",
")",
",",
"self",
".",
"_mutually_exclusive_groups",
",",
"''",
")",
"prog",
"=",
"f",
".",
"format_help",
"(",
")",
".",
"strip",
"(",
")",
"return",
"super",
"(",
")",
".",
"add_subparsers",
"(",
"prog",
"=",
"prog",
",",
"*",
"*",
"kwargs",
")"
] |
Supplement a proper `prog` keyword argument for the subprocessor.
The superclass technique for getting the `prog` value breaks because
of our VT100 escape codes injected by `format_help`.
|
[
"Supplement",
"a",
"proper",
"prog",
"keyword",
"argument",
"for",
"the",
"subprocessor",
".",
"The",
"superclass",
"technique",
"for",
"getting",
"the",
"prog",
"value",
"breaks",
"because",
"of",
"our",
"VT100",
"escape",
"codes",
"injected",
"by",
"format_help",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/command/supplement.py#L351-L361
|
240,570
|
ThreshingFloor/libtf
|
libtf/logparsers/tf_generic_log.py
|
TFGenericLog._analyze
|
def _analyze(self):
"""
Apply the filter to the log file
"""
for parsed_line in self.parsed_lines:
if 'ip' in parsed_line:
if parsed_line['ip'] in self.filter['ips']:
self.noisy_logs.append(parsed_line)
else:
self.quiet_logs.append(parsed_line)
else:
self.quiet_logs.append(parsed_line)
|
python
|
def _analyze(self):
"""
Apply the filter to the log file
"""
for parsed_line in self.parsed_lines:
if 'ip' in parsed_line:
if parsed_line['ip'] in self.filter['ips']:
self.noisy_logs.append(parsed_line)
else:
self.quiet_logs.append(parsed_line)
else:
self.quiet_logs.append(parsed_line)
|
[
"def",
"_analyze",
"(",
"self",
")",
":",
"for",
"parsed_line",
"in",
"self",
".",
"parsed_lines",
":",
"if",
"'ip'",
"in",
"parsed_line",
":",
"if",
"parsed_line",
"[",
"'ip'",
"]",
"in",
"self",
".",
"filter",
"[",
"'ips'",
"]",
":",
"self",
".",
"noisy_logs",
".",
"append",
"(",
"parsed_line",
")",
"else",
":",
"self",
".",
"quiet_logs",
".",
"append",
"(",
"parsed_line",
")",
"else",
":",
"self",
".",
"quiet_logs",
".",
"append",
"(",
"parsed_line",
")"
] |
Apply the filter to the log file
|
[
"Apply",
"the",
"filter",
"to",
"the",
"log",
"file"
] |
f1a8710f750639c9b9e2a468ece0d2923bf8c3df
|
https://github.com/ThreshingFloor/libtf/blob/f1a8710f750639c9b9e2a468ece0d2923bf8c3df/libtf/logparsers/tf_generic_log.py#L16-L27
|
240,571
|
ThreshingFloor/libtf
|
libtf/logparsers/tf_generic_log.py
|
TFGenericLog._extract_features
|
def _extract_features(self):
"""
Get the feature data from the log file necessary for a reduction
"""
for parsed_line in self.parsed_lines:
result = {'raw': parsed_line}
if 'ip' in parsed_line:
result['ip'] = parsed_line['ip']
if result['ip'] not in self.features['ips']:
self.features['ips'].append(result['ip'])
|
python
|
def _extract_features(self):
"""
Get the feature data from the log file necessary for a reduction
"""
for parsed_line in self.parsed_lines:
result = {'raw': parsed_line}
if 'ip' in parsed_line:
result['ip'] = parsed_line['ip']
if result['ip'] not in self.features['ips']:
self.features['ips'].append(result['ip'])
|
[
"def",
"_extract_features",
"(",
"self",
")",
":",
"for",
"parsed_line",
"in",
"self",
".",
"parsed_lines",
":",
"result",
"=",
"{",
"'raw'",
":",
"parsed_line",
"}",
"if",
"'ip'",
"in",
"parsed_line",
":",
"result",
"[",
"'ip'",
"]",
"=",
"parsed_line",
"[",
"'ip'",
"]",
"if",
"result",
"[",
"'ip'",
"]",
"not",
"in",
"self",
".",
"features",
"[",
"'ips'",
"]",
":",
"self",
".",
"features",
"[",
"'ips'",
"]",
".",
"append",
"(",
"result",
"[",
"'ip'",
"]",
")"
] |
Get the feature data from the log file necessary for a reduction
|
[
"Get",
"the",
"feature",
"data",
"from",
"the",
"log",
"file",
"necessary",
"for",
"a",
"reduction"
] |
f1a8710f750639c9b9e2a468ece0d2923bf8c3df
|
https://github.com/ThreshingFloor/libtf/blob/f1a8710f750639c9b9e2a468ece0d2923bf8c3df/libtf/logparsers/tf_generic_log.py#L45-L55
|
240,572
|
the01/python-paps
|
paps/si/app/sensor.py
|
Sensor._get_local_ip
|
def _get_local_ip():
"""
Get the local ip of this device
:return: Ip of this computer
:rtype: str
"""
return set([x[4][0] for x in socket.getaddrinfo(
socket.gethostname(),
80,
socket.AF_INET
)]).pop()
|
python
|
def _get_local_ip():
"""
Get the local ip of this device
:return: Ip of this computer
:rtype: str
"""
return set([x[4][0] for x in socket.getaddrinfo(
socket.gethostname(),
80,
socket.AF_INET
)]).pop()
|
[
"def",
"_get_local_ip",
"(",
")",
":",
"return",
"set",
"(",
"[",
"x",
"[",
"4",
"]",
"[",
"0",
"]",
"for",
"x",
"in",
"socket",
".",
"getaddrinfo",
"(",
"socket",
".",
"gethostname",
"(",
")",
",",
"80",
",",
"socket",
".",
"AF_INET",
")",
"]",
")",
".",
"pop",
"(",
")"
] |
Get the local ip of this device
:return: Ip of this computer
:rtype: str
|
[
"Get",
"the",
"local",
"ip",
"of",
"this",
"device"
] |
2dde5a71913e4c7b22901cf05c6ecedd890919c4
|
https://github.com/the01/python-paps/blob/2dde5a71913e4c7b22901cf05c6ecedd890919c4/paps/si/app/sensor.py#L96-L107
|
240,573
|
the01/python-paps
|
paps/si/app/sensor.py
|
Sensor._init_listen_socket
|
def _init_listen_socket(self):
"""
Init listen socket
:rtype: None
"""
self.debug("()")
self._listen_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._listen_socket.setsockopt(
socket.SOL_SOCKET,
socket.SO_REUSEADDR,
1
)
self._listen_socket.bind((self._listen_ip, self._listen_port))
self._listening.append(self._listen_socket)
|
python
|
def _init_listen_socket(self):
"""
Init listen socket
:rtype: None
"""
self.debug("()")
self._listen_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._listen_socket.setsockopt(
socket.SOL_SOCKET,
socket.SO_REUSEADDR,
1
)
self._listen_socket.bind((self._listen_ip, self._listen_port))
self._listening.append(self._listen_socket)
|
[
"def",
"_init_listen_socket",
"(",
"self",
")",
":",
"self",
".",
"debug",
"(",
"\"()\"",
")",
"self",
".",
"_listen_socket",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_DGRAM",
")",
"self",
".",
"_listen_socket",
".",
"setsockopt",
"(",
"socket",
".",
"SOL_SOCKET",
",",
"socket",
".",
"SO_REUSEADDR",
",",
"1",
")",
"self",
".",
"_listen_socket",
".",
"bind",
"(",
"(",
"self",
".",
"_listen_ip",
",",
"self",
".",
"_listen_port",
")",
")",
"self",
".",
"_listening",
".",
"append",
"(",
"self",
".",
"_listen_socket",
")"
] |
Init listen socket
:rtype: None
|
[
"Init",
"listen",
"socket"
] |
2dde5a71913e4c7b22901cf05c6ecedd890919c4
|
https://github.com/the01/python-paps/blob/2dde5a71913e4c7b22901cf05c6ecedd890919c4/paps/si/app/sensor.py#L109-L123
|
240,574
|
the01/python-paps
|
paps/si/app/sensor.py
|
Sensor._shutdown_listen_socket
|
def _shutdown_listen_socket(self):
"""
Shutdown listening socket
:rtype: None
"""
self.debug("()")
if self._listen_socket in self._listening:
self._listening.remove(self._listen_socket)
if self._listen_socket:
self._listen_socket.close()
self._listen_socket = None
|
python
|
def _shutdown_listen_socket(self):
"""
Shutdown listening socket
:rtype: None
"""
self.debug("()")
if self._listen_socket in self._listening:
self._listening.remove(self._listen_socket)
if self._listen_socket:
self._listen_socket.close()
self._listen_socket = None
|
[
"def",
"_shutdown_listen_socket",
"(",
"self",
")",
":",
"self",
".",
"debug",
"(",
"\"()\"",
")",
"if",
"self",
".",
"_listen_socket",
"in",
"self",
".",
"_listening",
":",
"self",
".",
"_listening",
".",
"remove",
"(",
"self",
".",
"_listen_socket",
")",
"if",
"self",
".",
"_listen_socket",
":",
"self",
".",
"_listen_socket",
".",
"close",
"(",
")",
"self",
".",
"_listen_socket",
"=",
"None"
] |
Shutdown listening socket
:rtype: None
|
[
"Shutdown",
"listening",
"socket"
] |
2dde5a71913e4c7b22901cf05c6ecedd890919c4
|
https://github.com/the01/python-paps/blob/2dde5a71913e4c7b22901cf05c6ecedd890919c4/paps/si/app/sensor.py#L125-L136
|
240,575
|
the01/python-paps
|
paps/si/app/sensor.py
|
Sensor._send
|
def _send(self, ip, port, data):
"""
Send an UDP message
:param ip: Ip to send to
:type ip: str
:param port: Port to send to
:type port: int
:return: Number of bytes sent
:rtype: int
"""
return self._listen_socket.sendto(data, (ip, port))
|
python
|
def _send(self, ip, port, data):
"""
Send an UDP message
:param ip: Ip to send to
:type ip: str
:param port: Port to send to
:type port: int
:return: Number of bytes sent
:rtype: int
"""
return self._listen_socket.sendto(data, (ip, port))
|
[
"def",
"_send",
"(",
"self",
",",
"ip",
",",
"port",
",",
"data",
")",
":",
"return",
"self",
".",
"_listen_socket",
".",
"sendto",
"(",
"data",
",",
"(",
"ip",
",",
"port",
")",
")"
] |
Send an UDP message
:param ip: Ip to send to
:type ip: str
:param port: Port to send to
:type port: int
:return: Number of bytes sent
:rtype: int
|
[
"Send",
"an",
"UDP",
"message"
] |
2dde5a71913e4c7b22901cf05c6ecedd890919c4
|
https://github.com/the01/python-paps/blob/2dde5a71913e4c7b22901cf05c6ecedd890919c4/paps/si/app/sensor.py#L138-L149
|
240,576
|
the01/python-paps
|
paps/si/app/sensor.py
|
Sensor._send_ack
|
def _send_ack(self, ip, port, packet, update_timestamp=True):
"""
Send an ack packet
:param ip: Ip to send to
:type ip: str
:param port: Port to send to
:type port: int
:param packet: Packet to be acknowledged
:type packet: APPMessage
:param update_timestamp: Should update timestamp to current
:type update_timestamp: bool
:rtype: None
"""
# TODO: maybe wait a bit, so the ack could get attached to another
# packet
ack = APPMessage(message_type=MsgType.ACK)
ack.header.ack_sequence_number = packet.header.sequence_number
self._send_packet(
ip, port, ack,
update_timestamp=update_timestamp, acknowledge_packet=False
)
|
python
|
def _send_ack(self, ip, port, packet, update_timestamp=True):
"""
Send an ack packet
:param ip: Ip to send to
:type ip: str
:param port: Port to send to
:type port: int
:param packet: Packet to be acknowledged
:type packet: APPMessage
:param update_timestamp: Should update timestamp to current
:type update_timestamp: bool
:rtype: None
"""
# TODO: maybe wait a bit, so the ack could get attached to another
# packet
ack = APPMessage(message_type=MsgType.ACK)
ack.header.ack_sequence_number = packet.header.sequence_number
self._send_packet(
ip, port, ack,
update_timestamp=update_timestamp, acknowledge_packet=False
)
|
[
"def",
"_send_ack",
"(",
"self",
",",
"ip",
",",
"port",
",",
"packet",
",",
"update_timestamp",
"=",
"True",
")",
":",
"# TODO: maybe wait a bit, so the ack could get attached to another",
"# packet",
"ack",
"=",
"APPMessage",
"(",
"message_type",
"=",
"MsgType",
".",
"ACK",
")",
"ack",
".",
"header",
".",
"ack_sequence_number",
"=",
"packet",
".",
"header",
".",
"sequence_number",
"self",
".",
"_send_packet",
"(",
"ip",
",",
"port",
",",
"ack",
",",
"update_timestamp",
"=",
"update_timestamp",
",",
"acknowledge_packet",
"=",
"False",
")"
] |
Send an ack packet
:param ip: Ip to send to
:type ip: str
:param port: Port to send to
:type port: int
:param packet: Packet to be acknowledged
:type packet: APPMessage
:param update_timestamp: Should update timestamp to current
:type update_timestamp: bool
:rtype: None
|
[
"Send",
"an",
"ack",
"packet"
] |
2dde5a71913e4c7b22901cf05c6ecedd890919c4
|
https://github.com/the01/python-paps/blob/2dde5a71913e4c7b22901cf05c6ecedd890919c4/paps/si/app/sensor.py#L189-L210
|
240,577
|
the01/python-paps
|
paps/si/app/sensor.py
|
Sensor._get_packet
|
def _get_packet(self, socket):
"""
Read packet and put it into inbox
:param socket: Socket to read from
:type socket: socket.socket
:return: Read packet
:rtype: APPMessage
"""
data, (ip, port) = socket.recvfrom(self._buffer_size)
packet, remainder = self._unpack(data)
self.inbox.put((ip, port, packet))
self.new_packet.set()
self.debug(u"RX: {}".format(packet))
if packet.header.sequence_number is not None:
# Packet needs to be acknowledged
self._send_ack(ip, port, packet)
ack_seq = packet.header.ack_sequence_number
if ack_seq is not None:
# Packet got acknowledged
with self._seq_ack_lock:
if ack_seq in self._seq_ack:
self.debug(u"Seq {} got acked".format(ack_seq))
self._seq_ack.remove(ack_seq)
return packet
|
python
|
def _get_packet(self, socket):
"""
Read packet and put it into inbox
:param socket: Socket to read from
:type socket: socket.socket
:return: Read packet
:rtype: APPMessage
"""
data, (ip, port) = socket.recvfrom(self._buffer_size)
packet, remainder = self._unpack(data)
self.inbox.put((ip, port, packet))
self.new_packet.set()
self.debug(u"RX: {}".format(packet))
if packet.header.sequence_number is not None:
# Packet needs to be acknowledged
self._send_ack(ip, port, packet)
ack_seq = packet.header.ack_sequence_number
if ack_seq is not None:
# Packet got acknowledged
with self._seq_ack_lock:
if ack_seq in self._seq_ack:
self.debug(u"Seq {} got acked".format(ack_seq))
self._seq_ack.remove(ack_seq)
return packet
|
[
"def",
"_get_packet",
"(",
"self",
",",
"socket",
")",
":",
"data",
",",
"(",
"ip",
",",
"port",
")",
"=",
"socket",
".",
"recvfrom",
"(",
"self",
".",
"_buffer_size",
")",
"packet",
",",
"remainder",
"=",
"self",
".",
"_unpack",
"(",
"data",
")",
"self",
".",
"inbox",
".",
"put",
"(",
"(",
"ip",
",",
"port",
",",
"packet",
")",
")",
"self",
".",
"new_packet",
".",
"set",
"(",
")",
"self",
".",
"debug",
"(",
"u\"RX: {}\"",
".",
"format",
"(",
"packet",
")",
")",
"if",
"packet",
".",
"header",
".",
"sequence_number",
"is",
"not",
"None",
":",
"# Packet needs to be acknowledged",
"self",
".",
"_send_ack",
"(",
"ip",
",",
"port",
",",
"packet",
")",
"ack_seq",
"=",
"packet",
".",
"header",
".",
"ack_sequence_number",
"if",
"ack_seq",
"is",
"not",
"None",
":",
"# Packet got acknowledged",
"with",
"self",
".",
"_seq_ack_lock",
":",
"if",
"ack_seq",
"in",
"self",
".",
"_seq_ack",
":",
"self",
".",
"debug",
"(",
"u\"Seq {} got acked\"",
".",
"format",
"(",
"ack_seq",
")",
")",
"self",
".",
"_seq_ack",
".",
"remove",
"(",
"ack_seq",
")",
"return",
"packet"
] |
Read packet and put it into inbox
:param socket: Socket to read from
:type socket: socket.socket
:return: Read packet
:rtype: APPMessage
|
[
"Read",
"packet",
"and",
"put",
"it",
"into",
"inbox"
] |
2dde5a71913e4c7b22901cf05c6ecedd890919c4
|
https://github.com/the01/python-paps/blob/2dde5a71913e4c7b22901cf05c6ecedd890919c4/paps/si/app/sensor.py#L232-L257
|
240,578
|
the01/python-paps
|
paps/si/app/sensor.py
|
Sensor._acking
|
def _acking(self, params=None):
"""
Packet acknowledge and retry loop
:param params: Ignore
:type params: None
:rtype: None
"""
while self._is_running:
try:
t, num_try, (ip, port), packet = self._to_ack.get(
timeout=self._select_timeout
)
except queue.Empty:
# Timed out
continue
diff = t - time.time()
if diff > 0:
time.sleep(diff)
with self._seq_ack_lock:
if packet.header.sequence_number not in self._seq_ack:
# Not waiting for this?
continue
if num_try <= self._retransmit_max_tries:
# Try again
self._send(ip, port, packet.pack(True))
self._to_ack.put(
(
time.time() + self._retransmit_timeout,
num_try + 1,
(ip, port),
packet
)
)
else:
# Failed to ack
with self._seq_ack_lock:
try:
self._seq_ack.remove(packet.header.sequence_number)
except KeyError:
pass
self.warning("Exceeded max tries")
|
python
|
def _acking(self, params=None):
"""
Packet acknowledge and retry loop
:param params: Ignore
:type params: None
:rtype: None
"""
while self._is_running:
try:
t, num_try, (ip, port), packet = self._to_ack.get(
timeout=self._select_timeout
)
except queue.Empty:
# Timed out
continue
diff = t - time.time()
if diff > 0:
time.sleep(diff)
with self._seq_ack_lock:
if packet.header.sequence_number not in self._seq_ack:
# Not waiting for this?
continue
if num_try <= self._retransmit_max_tries:
# Try again
self._send(ip, port, packet.pack(True))
self._to_ack.put(
(
time.time() + self._retransmit_timeout,
num_try + 1,
(ip, port),
packet
)
)
else:
# Failed to ack
with self._seq_ack_lock:
try:
self._seq_ack.remove(packet.header.sequence_number)
except KeyError:
pass
self.warning("Exceeded max tries")
|
[
"def",
"_acking",
"(",
"self",
",",
"params",
"=",
"None",
")",
":",
"while",
"self",
".",
"_is_running",
":",
"try",
":",
"t",
",",
"num_try",
",",
"(",
"ip",
",",
"port",
")",
",",
"packet",
"=",
"self",
".",
"_to_ack",
".",
"get",
"(",
"timeout",
"=",
"self",
".",
"_select_timeout",
")",
"except",
"queue",
".",
"Empty",
":",
"# Timed out",
"continue",
"diff",
"=",
"t",
"-",
"time",
".",
"time",
"(",
")",
"if",
"diff",
">",
"0",
":",
"time",
".",
"sleep",
"(",
"diff",
")",
"with",
"self",
".",
"_seq_ack_lock",
":",
"if",
"packet",
".",
"header",
".",
"sequence_number",
"not",
"in",
"self",
".",
"_seq_ack",
":",
"# Not waiting for this?",
"continue",
"if",
"num_try",
"<=",
"self",
".",
"_retransmit_max_tries",
":",
"# Try again",
"self",
".",
"_send",
"(",
"ip",
",",
"port",
",",
"packet",
".",
"pack",
"(",
"True",
")",
")",
"self",
".",
"_to_ack",
".",
"put",
"(",
"(",
"time",
".",
"time",
"(",
")",
"+",
"self",
".",
"_retransmit_timeout",
",",
"num_try",
"+",
"1",
",",
"(",
"ip",
",",
"port",
")",
",",
"packet",
")",
")",
"else",
":",
"# Failed to ack",
"with",
"self",
".",
"_seq_ack_lock",
":",
"try",
":",
"self",
".",
"_seq_ack",
".",
"remove",
"(",
"packet",
".",
"header",
".",
"sequence_number",
")",
"except",
"KeyError",
":",
"pass",
"self",
".",
"warning",
"(",
"\"Exceeded max tries\"",
")"
] |
Packet acknowledge and retry loop
:param params: Ignore
:type params: None
:rtype: None
|
[
"Packet",
"acknowledge",
"and",
"retry",
"loop"
] |
2dde5a71913e4c7b22901cf05c6ecedd890919c4
|
https://github.com/the01/python-paps/blob/2dde5a71913e4c7b22901cf05c6ecedd890919c4/paps/si/app/sensor.py#L293-L337
|
240,579
|
carlcarl/grabflickr
|
grabflickr/grabflickr.py
|
_get_request_args
|
def _get_request_args(method, **kwargs):
"""Use `method` and other settings to produce a flickr API arguments.
Here also use json as the return type.
:param method: The method provided by flickr,
ex: flickr.photosets.getPhotos
:type method: str
:param kwargs: Other settings
:type kwargs: dict
:return: An argument list used for post request
:rtype: list of sets
"""
args = [
('api_key', api_key),
('format', 'json'),
('method', method),
('nojsoncallback', '1'),
]
if kwargs:
for key, value in kwargs.iteritems():
args.append((key, value))
args.sort(key=lambda tup: tup[0])
api_sig = _get_api_sig(args)
args.append(api_sig)
return args
|
python
|
def _get_request_args(method, **kwargs):
"""Use `method` and other settings to produce a flickr API arguments.
Here also use json as the return type.
:param method: The method provided by flickr,
ex: flickr.photosets.getPhotos
:type method: str
:param kwargs: Other settings
:type kwargs: dict
:return: An argument list used for post request
:rtype: list of sets
"""
args = [
('api_key', api_key),
('format', 'json'),
('method', method),
('nojsoncallback', '1'),
]
if kwargs:
for key, value in kwargs.iteritems():
args.append((key, value))
args.sort(key=lambda tup: tup[0])
api_sig = _get_api_sig(args)
args.append(api_sig)
return args
|
[
"def",
"_get_request_args",
"(",
"method",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"[",
"(",
"'api_key'",
",",
"api_key",
")",
",",
"(",
"'format'",
",",
"'json'",
")",
",",
"(",
"'method'",
",",
"method",
")",
",",
"(",
"'nojsoncallback'",
",",
"'1'",
")",
",",
"]",
"if",
"kwargs",
":",
"for",
"key",
",",
"value",
"in",
"kwargs",
".",
"iteritems",
"(",
")",
":",
"args",
".",
"append",
"(",
"(",
"key",
",",
"value",
")",
")",
"args",
".",
"sort",
"(",
"key",
"=",
"lambda",
"tup",
":",
"tup",
"[",
"0",
"]",
")",
"api_sig",
"=",
"_get_api_sig",
"(",
"args",
")",
"args",
".",
"append",
"(",
"api_sig",
")",
"return",
"args"
] |
Use `method` and other settings to produce a flickr API arguments.
Here also use json as the return type.
:param method: The method provided by flickr,
ex: flickr.photosets.getPhotos
:type method: str
:param kwargs: Other settings
:type kwargs: dict
:return: An argument list used for post request
:rtype: list of sets
|
[
"Use",
"method",
"and",
"other",
"settings",
"to",
"produce",
"a",
"flickr",
"API",
"arguments",
".",
"Here",
"also",
"use",
"json",
"as",
"the",
"return",
"type",
"."
] |
e9cb2365de80c1819cfd5083c032d0d985f3c614
|
https://github.com/carlcarl/grabflickr/blob/e9cb2365de80c1819cfd5083c032d0d985f3c614/grabflickr/grabflickr.py#L72-L96
|
240,580
|
carlcarl/grabflickr
|
grabflickr/grabflickr.py
|
_get_api_sig
|
def _get_api_sig(args):
"""Flickr API need a hash string which made using post arguments
:param args: Arguments of the flickr request
:type args: list of sets
:return: api_sig, ex: ('api_sig', 'abcdefg')
:rtype: tuple
"""
tmp_sig = api_secret
for i in args:
tmp_sig = tmp_sig + i[0] + i[1]
api_sig = hashlib.md5(tmp_sig.encode('utf-8')).hexdigest()
return 'api_sig', api_sig
|
python
|
def _get_api_sig(args):
"""Flickr API need a hash string which made using post arguments
:param args: Arguments of the flickr request
:type args: list of sets
:return: api_sig, ex: ('api_sig', 'abcdefg')
:rtype: tuple
"""
tmp_sig = api_secret
for i in args:
tmp_sig = tmp_sig + i[0] + i[1]
api_sig = hashlib.md5(tmp_sig.encode('utf-8')).hexdigest()
return 'api_sig', api_sig
|
[
"def",
"_get_api_sig",
"(",
"args",
")",
":",
"tmp_sig",
"=",
"api_secret",
"for",
"i",
"in",
"args",
":",
"tmp_sig",
"=",
"tmp_sig",
"+",
"i",
"[",
"0",
"]",
"+",
"i",
"[",
"1",
"]",
"api_sig",
"=",
"hashlib",
".",
"md5",
"(",
"tmp_sig",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"hexdigest",
"(",
")",
"return",
"'api_sig'",
",",
"api_sig"
] |
Flickr API need a hash string which made using post arguments
:param args: Arguments of the flickr request
:type args: list of sets
:return: api_sig, ex: ('api_sig', 'abcdefg')
:rtype: tuple
|
[
"Flickr",
"API",
"need",
"a",
"hash",
"string",
"which",
"made",
"using",
"post",
"arguments"
] |
e9cb2365de80c1819cfd5083c032d0d985f3c614
|
https://github.com/carlcarl/grabflickr/blob/e9cb2365de80c1819cfd5083c032d0d985f3c614/grabflickr/grabflickr.py#L99-L111
|
240,581
|
carlcarl/grabflickr
|
grabflickr/grabflickr.py
|
create_dir
|
def create_dir(path):
"""Create dir with the path
:param path: The path to be created
:type path: str
"""
if os.path.exists(path):
if not os.path.isdir(path):
logger.error('%s is not a directory', path)
sys.exit(1)
else: # ignore
pass
else:
os.makedirs(path)
logger.info('Create dir: %s', path)
|
python
|
def create_dir(path):
"""Create dir with the path
:param path: The path to be created
:type path: str
"""
if os.path.exists(path):
if not os.path.isdir(path):
logger.error('%s is not a directory', path)
sys.exit(1)
else: # ignore
pass
else:
os.makedirs(path)
logger.info('Create dir: %s', path)
|
[
"def",
"create_dir",
"(",
"path",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"logger",
".",
"error",
"(",
"'%s is not a directory'",
",",
"path",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"else",
":",
"# ignore",
"pass",
"else",
":",
"os",
".",
"makedirs",
"(",
"path",
")",
"logger",
".",
"info",
"(",
"'Create dir: %s'",
",",
"path",
")"
] |
Create dir with the path
:param path: The path to be created
:type path: str
|
[
"Create",
"dir",
"with",
"the",
"path"
] |
e9cb2365de80c1819cfd5083c032d0d985f3c614
|
https://github.com/carlcarl/grabflickr/blob/e9cb2365de80c1819cfd5083c032d0d985f3c614/grabflickr/grabflickr.py#L114-L128
|
240,582
|
carlcarl/grabflickr
|
grabflickr/grabflickr.py
|
get_photos_info
|
def get_photos_info(photoset_id):
"""Request the photos information with the photoset id
:param photoset_id: The photoset id of flickr
:type photoset_id: str
:return: photos information
:rtype: list
"""
args = _get_request_args(
'flickr.photosets.getPhotos',
photoset_id=photoset_id
)
resp = requests.post(API_URL, data=args)
resp_json = json.loads(resp.text.encode('utf-8'))
logger.debug(resp_json)
photos = resp_json['photoset']['photo']
return photos
|
python
|
def get_photos_info(photoset_id):
"""Request the photos information with the photoset id
:param photoset_id: The photoset id of flickr
:type photoset_id: str
:return: photos information
:rtype: list
"""
args = _get_request_args(
'flickr.photosets.getPhotos',
photoset_id=photoset_id
)
resp = requests.post(API_URL, data=args)
resp_json = json.loads(resp.text.encode('utf-8'))
logger.debug(resp_json)
photos = resp_json['photoset']['photo']
return photos
|
[
"def",
"get_photos_info",
"(",
"photoset_id",
")",
":",
"args",
"=",
"_get_request_args",
"(",
"'flickr.photosets.getPhotos'",
",",
"photoset_id",
"=",
"photoset_id",
")",
"resp",
"=",
"requests",
".",
"post",
"(",
"API_URL",
",",
"data",
"=",
"args",
")",
"resp_json",
"=",
"json",
".",
"loads",
"(",
"resp",
".",
"text",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"logger",
".",
"debug",
"(",
"resp_json",
")",
"photos",
"=",
"resp_json",
"[",
"'photoset'",
"]",
"[",
"'photo'",
"]",
"return",
"photos"
] |
Request the photos information with the photoset id
:param photoset_id: The photoset id of flickr
:type photoset_id: str
:return: photos information
:rtype: list
|
[
"Request",
"the",
"photos",
"information",
"with",
"the",
"photoset",
"id"
] |
e9cb2365de80c1819cfd5083c032d0d985f3c614
|
https://github.com/carlcarl/grabflickr/blob/e9cb2365de80c1819cfd5083c032d0d985f3c614/grabflickr/grabflickr.py#L131-L147
|
240,583
|
carlcarl/grabflickr
|
grabflickr/grabflickr.py
|
single_download_photos
|
def single_download_photos(photos):
"""Use single process to download photos
:param photos: The photos to be downloaded
:type photos: list of dicts
"""
global counter
counter = len(photos)
for photo in photos:
download_photo(photo)
|
python
|
def single_download_photos(photos):
"""Use single process to download photos
:param photos: The photos to be downloaded
:type photos: list of dicts
"""
global counter
counter = len(photos)
for photo in photos:
download_photo(photo)
|
[
"def",
"single_download_photos",
"(",
"photos",
")",
":",
"global",
"counter",
"counter",
"=",
"len",
"(",
"photos",
")",
"for",
"photo",
"in",
"photos",
":",
"download_photo",
"(",
"photo",
")"
] |
Use single process to download photos
:param photos: The photos to be downloaded
:type photos: list of dicts
|
[
"Use",
"single",
"process",
"to",
"download",
"photos"
] |
e9cb2365de80c1819cfd5083c032d0d985f3c614
|
https://github.com/carlcarl/grabflickr/blob/e9cb2365de80c1819cfd5083c032d0d985f3c614/grabflickr/grabflickr.py#L224-L233
|
240,584
|
carlcarl/grabflickr
|
grabflickr/grabflickr.py
|
multithread_download_photos
|
def multithread_download_photos(photos):
"""Use multiple threads to download photos
:param photos: The photos to be downloaded
:type photos: list of dicts
"""
from concurrent import futures
global counter
counter = len(photos)
cpu_num = multiprocessing.cpu_count()
with futures.ThreadPoolExecutor(max_workers=cpu_num) as executor:
for photo in photos:
executor.submit(download_photo, photo)
|
python
|
def multithread_download_photos(photos):
"""Use multiple threads to download photos
:param photos: The photos to be downloaded
:type photos: list of dicts
"""
from concurrent import futures
global counter
counter = len(photos)
cpu_num = multiprocessing.cpu_count()
with futures.ThreadPoolExecutor(max_workers=cpu_num) as executor:
for photo in photos:
executor.submit(download_photo, photo)
|
[
"def",
"multithread_download_photos",
"(",
"photos",
")",
":",
"from",
"concurrent",
"import",
"futures",
"global",
"counter",
"counter",
"=",
"len",
"(",
"photos",
")",
"cpu_num",
"=",
"multiprocessing",
".",
"cpu_count",
"(",
")",
"with",
"futures",
".",
"ThreadPoolExecutor",
"(",
"max_workers",
"=",
"cpu_num",
")",
"as",
"executor",
":",
"for",
"photo",
"in",
"photos",
":",
"executor",
".",
"submit",
"(",
"download_photo",
",",
"photo",
")"
] |
Use multiple threads to download photos
:param photos: The photos to be downloaded
:type photos: list of dicts
|
[
"Use",
"multiple",
"threads",
"to",
"download",
"photos"
] |
e9cb2365de80c1819cfd5083c032d0d985f3c614
|
https://github.com/carlcarl/grabflickr/blob/e9cb2365de80c1819cfd5083c032d0d985f3c614/grabflickr/grabflickr.py#L256-L268
|
240,585
|
carlcarl/grabflickr
|
grabflickr/grabflickr.py
|
init_logger
|
def init_logger():
"""Initialize the logger and set its format
"""
formatter = logging.Formatter('%(levelname)s: %(message)s')
console = logging.StreamHandler(stream=sys.stdout)
console.setLevel(logging.INFO)
console.setFormatter(formatter)
logger.addHandler(console)
|
python
|
def init_logger():
"""Initialize the logger and set its format
"""
formatter = logging.Formatter('%(levelname)s: %(message)s')
console = logging.StreamHandler(stream=sys.stdout)
console.setLevel(logging.INFO)
console.setFormatter(formatter)
logger.addHandler(console)
|
[
"def",
"init_logger",
"(",
")",
":",
"formatter",
"=",
"logging",
".",
"Formatter",
"(",
"'%(levelname)s: %(message)s'",
")",
"console",
"=",
"logging",
".",
"StreamHandler",
"(",
"stream",
"=",
"sys",
".",
"stdout",
")",
"console",
".",
"setLevel",
"(",
"logging",
".",
"INFO",
")",
"console",
".",
"setFormatter",
"(",
"formatter",
")",
"logger",
".",
"addHandler",
"(",
"console",
")"
] |
Initialize the logger and set its format
|
[
"Initialize",
"the",
"logger",
"and",
"set",
"its",
"format"
] |
e9cb2365de80c1819cfd5083c032d0d985f3c614
|
https://github.com/carlcarl/grabflickr/blob/e9cb2365de80c1819cfd5083c032d0d985f3c614/grabflickr/grabflickr.py#L271-L278
|
240,586
|
carlcarl/grabflickr
|
grabflickr/grabflickr.py
|
_gevent_patch
|
def _gevent_patch():
"""Patch the modules with gevent
:return: Default is GEVENT. If it not supports gevent then return MULTITHREAD
:rtype: int
"""
try:
assert gevent
assert grequests
except NameError:
logger.warn('gevent not exist, fallback to multiprocess...')
return MULTITHREAD
else:
monkey.patch_all() # Must patch before get_photos_info
return GEVENT
|
python
|
def _gevent_patch():
"""Patch the modules with gevent
:return: Default is GEVENT. If it not supports gevent then return MULTITHREAD
:rtype: int
"""
try:
assert gevent
assert grequests
except NameError:
logger.warn('gevent not exist, fallback to multiprocess...')
return MULTITHREAD
else:
monkey.patch_all() # Must patch before get_photos_info
return GEVENT
|
[
"def",
"_gevent_patch",
"(",
")",
":",
"try",
":",
"assert",
"gevent",
"assert",
"grequests",
"except",
"NameError",
":",
"logger",
".",
"warn",
"(",
"'gevent not exist, fallback to multiprocess...'",
")",
"return",
"MULTITHREAD",
"else",
":",
"monkey",
".",
"patch_all",
"(",
")",
"# Must patch before get_photos_info",
"return",
"GEVENT"
] |
Patch the modules with gevent
:return: Default is GEVENT. If it not supports gevent then return MULTITHREAD
:rtype: int
|
[
"Patch",
"the",
"modules",
"with",
"gevent"
] |
e9cb2365de80c1819cfd5083c032d0d985f3c614
|
https://github.com/carlcarl/grabflickr/blob/e9cb2365de80c1819cfd5083c032d0d985f3c614/grabflickr/grabflickr.py#L349-L363
|
240,587
|
carlcarl/grabflickr
|
grabflickr/grabflickr.py
|
main
|
def main():
"""The main procedure
"""
init_logger()
args = _parse_cli_args()
if args.u:
enter_api_key()
return
if args.O == GEVENT:
args.O = _gevent_patch()
set_image_size_mode(args.s)
photoset_id = args.g
global directory
directory = args.d if args.d else photoset_id
read_config()
photos = get_photos_info(photoset_id)
create_dir(directory)
if args.O == SINGLE_PROCESS:
single_download_photos(photos)
elif args.O == GEVENT:
event_download_photos(photos)
elif args.O == MULTITHREAD:
multithread_download_photos(photos)
else:
logger.error('Unknown Error')
|
python
|
def main():
"""The main procedure
"""
init_logger()
args = _parse_cli_args()
if args.u:
enter_api_key()
return
if args.O == GEVENT:
args.O = _gevent_patch()
set_image_size_mode(args.s)
photoset_id = args.g
global directory
directory = args.d if args.d else photoset_id
read_config()
photos = get_photos_info(photoset_id)
create_dir(directory)
if args.O == SINGLE_PROCESS:
single_download_photos(photos)
elif args.O == GEVENT:
event_download_photos(photos)
elif args.O == MULTITHREAD:
multithread_download_photos(photos)
else:
logger.error('Unknown Error')
|
[
"def",
"main",
"(",
")",
":",
"init_logger",
"(",
")",
"args",
"=",
"_parse_cli_args",
"(",
")",
"if",
"args",
".",
"u",
":",
"enter_api_key",
"(",
")",
"return",
"if",
"args",
".",
"O",
"==",
"GEVENT",
":",
"args",
".",
"O",
"=",
"_gevent_patch",
"(",
")",
"set_image_size_mode",
"(",
"args",
".",
"s",
")",
"photoset_id",
"=",
"args",
".",
"g",
"global",
"directory",
"directory",
"=",
"args",
".",
"d",
"if",
"args",
".",
"d",
"else",
"photoset_id",
"read_config",
"(",
")",
"photos",
"=",
"get_photos_info",
"(",
"photoset_id",
")",
"create_dir",
"(",
"directory",
")",
"if",
"args",
".",
"O",
"==",
"SINGLE_PROCESS",
":",
"single_download_photos",
"(",
"photos",
")",
"elif",
"args",
".",
"O",
"==",
"GEVENT",
":",
"event_download_photos",
"(",
"photos",
")",
"elif",
"args",
".",
"O",
"==",
"MULTITHREAD",
":",
"multithread_download_photos",
"(",
"photos",
")",
"else",
":",
"logger",
".",
"error",
"(",
"'Unknown Error'",
")"
] |
The main procedure
|
[
"The",
"main",
"procedure"
] |
e9cb2365de80c1819cfd5083c032d0d985f3c614
|
https://github.com/carlcarl/grabflickr/blob/e9cb2365de80c1819cfd5083c032d0d985f3c614/grabflickr/grabflickr.py#L366-L396
|
240,588
|
bobbyesh/word-fencer
|
wordfencer/parser.py
|
is_cjk_punctuation
|
def is_cjk_punctuation(char):
"""Returns true if char is a punctuation mark in a CJK language."""
lower = int('0x3000', 16)
higher = int('0x300F', 16)
return ord(char) >= lower and ord(char) <= higher
|
python
|
def is_cjk_punctuation(char):
"""Returns true if char is a punctuation mark in a CJK language."""
lower = int('0x3000', 16)
higher = int('0x300F', 16)
return ord(char) >= lower and ord(char) <= higher
|
[
"def",
"is_cjk_punctuation",
"(",
"char",
")",
":",
"lower",
"=",
"int",
"(",
"'0x3000'",
",",
"16",
")",
"higher",
"=",
"int",
"(",
"'0x300F'",
",",
"16",
")",
"return",
"ord",
"(",
"char",
")",
">=",
"lower",
"and",
"ord",
"(",
"char",
")",
"<=",
"higher"
] |
Returns true if char is a punctuation mark in a CJK language.
|
[
"Returns",
"true",
"if",
"char",
"is",
"a",
"punctuation",
"mark",
"in",
"a",
"CJK",
"language",
"."
] |
267b9770611bf9f19ec5dd6e5190ef185ed0bfb4
|
https://github.com/bobbyesh/word-fencer/blob/267b9770611bf9f19ec5dd6e5190ef185ed0bfb4/wordfencer/parser.py#L34-L38
|
240,589
|
bobbyesh/word-fencer
|
wordfencer/parser.py
|
Parser.force_populate
|
def force_populate(self):
"""
Populates the parser with the entire contents of the
word reference file.
"""
if not os.path.exists(self.ref):
raise FileNotFoundError("The reference file path '{}' does not exists.".format(self.ref))
with open(self.ref, 'r') as f:
for word in f:
word = word.strip('\n')
self.db.add(word)
self.populated = True
|
python
|
def force_populate(self):
"""
Populates the parser with the entire contents of the
word reference file.
"""
if not os.path.exists(self.ref):
raise FileNotFoundError("The reference file path '{}' does not exists.".format(self.ref))
with open(self.ref, 'r') as f:
for word in f:
word = word.strip('\n')
self.db.add(word)
self.populated = True
|
[
"def",
"force_populate",
"(",
"self",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"ref",
")",
":",
"raise",
"FileNotFoundError",
"(",
"\"The reference file path '{}' does not exists.\"",
".",
"format",
"(",
"self",
".",
"ref",
")",
")",
"with",
"open",
"(",
"self",
".",
"ref",
",",
"'r'",
")",
"as",
"f",
":",
"for",
"word",
"in",
"f",
":",
"word",
"=",
"word",
".",
"strip",
"(",
"'\\n'",
")",
"self",
".",
"db",
".",
"add",
"(",
"word",
")",
"self",
".",
"populated",
"=",
"True"
] |
Populates the parser with the entire contents of the
word reference file.
|
[
"Populates",
"the",
"parser",
"with",
"the",
"entire",
"contents",
"of",
"the",
"word",
"reference",
"file",
"."
] |
267b9770611bf9f19ec5dd6e5190ef185ed0bfb4
|
https://github.com/bobbyesh/word-fencer/blob/267b9770611bf9f19ec5dd6e5190ef185ed0bfb4/wordfencer/parser.py#L156-L167
|
240,590
|
emencia/emencia-django-countries
|
emencia/django/countries/managers.py
|
CountryManager.leveled
|
def leveled(self):
"""Return all countries with a level set"""
# Compatibility support for Django<1.6
safe_get_queryset = (self.get_query_set if hasattr(self, 'get_query_set') else self.get_queryset)
return safe_get_queryset.exclude(level=0)
|
python
|
def leveled(self):
"""Return all countries with a level set"""
# Compatibility support for Django<1.6
safe_get_queryset = (self.get_query_set if hasattr(self, 'get_query_set') else self.get_queryset)
return safe_get_queryset.exclude(level=0)
|
[
"def",
"leveled",
"(",
"self",
")",
":",
"# Compatibility support for Django<1.6",
"safe_get_queryset",
"=",
"(",
"self",
".",
"get_query_set",
"if",
"hasattr",
"(",
"self",
",",
"'get_query_set'",
")",
"else",
"self",
".",
"get_queryset",
")",
"return",
"safe_get_queryset",
".",
"exclude",
"(",
"level",
"=",
"0",
")"
] |
Return all countries with a level set
|
[
"Return",
"all",
"countries",
"with",
"a",
"level",
"set"
] |
5ae8719f4b43caeca2c69c9e37e6d6bc5d7b0290
|
https://github.com/emencia/emencia-django-countries/blob/5ae8719f4b43caeca2c69c9e37e6d6bc5d7b0290/emencia/django/countries/managers.py#L7-L13
|
240,591
|
kenlowrie/pylib
|
kenl380/pylib/pylib.py
|
popd
|
def popd(pop_all=False, throw_if_dir_invalid=True):
"""Restore current working directory to previous directory.
The previous directory is whatever it was when last :py:meth:`pushd()` was
*last* called. :py:meth:`pushd()` creates a stack, so each call to popd()
simply sets the CWD back to what it was on the prior pushd() call.
Parameters
----------
pop_all : bool, optional
When `pop_all` is True, sets the CWD to the state when pushd() was
first called. Does NOT call os.getcwd() for intervening paths, only
the final path.
throw_if_dir_invalid : bool, optional
Whether or not to pass back up any exception raised by chdir().
Default is True.
Returns
-------
True : bool
Success
False : bool
Failure
Raises
------
OSError
If `throw_if_dir_invalid` is True and chdir raises an exception,
this function will chain the same exception as chdir, typically
OSError
ValueError
If popd() called on an empty stack; i.e. before :py:meth:`pushd()`
has been called.
Notes
-----
This method and its counterpart :py:meth:`pushd` are **not** thread safe!
"""
global _pushdstack
from os import chdir
if len(_pushdstack) == 0:
raise ValueError("popd() called on an empty stack.")
if pop_all:
while( len(_pushdstack) > 1):
_pushdstack.pop()
try:
chdir(_pushdstack.pop())
err = 0
except OSError:
if throw_if_dir_invalid:
raise
err = 1
return err == 0
|
python
|
def popd(pop_all=False, throw_if_dir_invalid=True):
"""Restore current working directory to previous directory.
The previous directory is whatever it was when last :py:meth:`pushd()` was
*last* called. :py:meth:`pushd()` creates a stack, so each call to popd()
simply sets the CWD back to what it was on the prior pushd() call.
Parameters
----------
pop_all : bool, optional
When `pop_all` is True, sets the CWD to the state when pushd() was
first called. Does NOT call os.getcwd() for intervening paths, only
the final path.
throw_if_dir_invalid : bool, optional
Whether or not to pass back up any exception raised by chdir().
Default is True.
Returns
-------
True : bool
Success
False : bool
Failure
Raises
------
OSError
If `throw_if_dir_invalid` is True and chdir raises an exception,
this function will chain the same exception as chdir, typically
OSError
ValueError
If popd() called on an empty stack; i.e. before :py:meth:`pushd()`
has been called.
Notes
-----
This method and its counterpart :py:meth:`pushd` are **not** thread safe!
"""
global _pushdstack
from os import chdir
if len(_pushdstack) == 0:
raise ValueError("popd() called on an empty stack.")
if pop_all:
while( len(_pushdstack) > 1):
_pushdstack.pop()
try:
chdir(_pushdstack.pop())
err = 0
except OSError:
if throw_if_dir_invalid:
raise
err = 1
return err == 0
|
[
"def",
"popd",
"(",
"pop_all",
"=",
"False",
",",
"throw_if_dir_invalid",
"=",
"True",
")",
":",
"global",
"_pushdstack",
"from",
"os",
"import",
"chdir",
"if",
"len",
"(",
"_pushdstack",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"popd() called on an empty stack.\"",
")",
"if",
"pop_all",
":",
"while",
"(",
"len",
"(",
"_pushdstack",
")",
">",
"1",
")",
":",
"_pushdstack",
".",
"pop",
"(",
")",
"try",
":",
"chdir",
"(",
"_pushdstack",
".",
"pop",
"(",
")",
")",
"err",
"=",
"0",
"except",
"OSError",
":",
"if",
"throw_if_dir_invalid",
":",
"raise",
"err",
"=",
"1",
"return",
"err",
"==",
"0"
] |
Restore current working directory to previous directory.
The previous directory is whatever it was when last :py:meth:`pushd()` was
*last* called. :py:meth:`pushd()` creates a stack, so each call to popd()
simply sets the CWD back to what it was on the prior pushd() call.
Parameters
----------
pop_all : bool, optional
When `pop_all` is True, sets the CWD to the state when pushd() was
first called. Does NOT call os.getcwd() for intervening paths, only
the final path.
throw_if_dir_invalid : bool, optional
Whether or not to pass back up any exception raised by chdir().
Default is True.
Returns
-------
True : bool
Success
False : bool
Failure
Raises
------
OSError
If `throw_if_dir_invalid` is True and chdir raises an exception,
this function will chain the same exception as chdir, typically
OSError
ValueError
If popd() called on an empty stack; i.e. before :py:meth:`pushd()`
has been called.
Notes
-----
This method and its counterpart :py:meth:`pushd` are **not** thread safe!
|
[
"Restore",
"current",
"working",
"directory",
"to",
"previous",
"directory",
"."
] |
3c288889ab27fc29b065016853faecd0a14c4224
|
https://github.com/kenlowrie/pylib/blob/3c288889ab27fc29b065016853faecd0a14c4224/kenl380/pylib/pylib.py#L462-L520
|
240,592
|
kenlowrie/pylib
|
kenl380/pylib/pylib.py
|
context.pyVersionStr
|
def pyVersionStr(self):
"""Version of Python running my script
Returns
-------
str
A descriptive string containing the version of Python running
this script.
"""
from sys import version_info
return "Python Interpreter Version: {}.{}.{}".format(version_info.major,
version_info.minor,
version_info.micro)
|
python
|
def pyVersionStr(self):
"""Version of Python running my script
Returns
-------
str
A descriptive string containing the version of Python running
this script.
"""
from sys import version_info
return "Python Interpreter Version: {}.{}.{}".format(version_info.major,
version_info.minor,
version_info.micro)
|
[
"def",
"pyVersionStr",
"(",
"self",
")",
":",
"from",
"sys",
"import",
"version_info",
"return",
"\"Python Interpreter Version: {}.{}.{}\"",
".",
"format",
"(",
"version_info",
".",
"major",
",",
"version_info",
".",
"minor",
",",
"version_info",
".",
"micro",
")"
] |
Version of Python running my script
Returns
-------
str
A descriptive string containing the version of Python running
this script.
|
[
"Version",
"of",
"Python",
"running",
"my",
"script"
] |
3c288889ab27fc29b065016853faecd0a14c4224
|
https://github.com/kenlowrie/pylib/blob/3c288889ab27fc29b065016853faecd0a14c4224/kenl380/pylib/pylib.py#L123-L136
|
240,593
|
kenlowrie/pylib
|
kenl380/pylib/pylib.py
|
ntpx.all
|
def all(self):
"""Returns a tuple containing all elements of the object
This method returns all elements of the path in the form of a tuple.
e.g.: `(abs_path, drive_letter, path_only, rootname, extension,
filesize, time_in_seconds)`.
Returns
-------
tuple
All elements of the path associated with this object as a tuple.
Notes
-----
If path points to a non-existant file, the size and datetime will
be returned as None (NoneType).
"""
return (self._full, self._driv, self._path, self._name, self._ext, self._size, self._time)
|
python
|
def all(self):
"""Returns a tuple containing all elements of the object
This method returns all elements of the path in the form of a tuple.
e.g.: `(abs_path, drive_letter, path_only, rootname, extension,
filesize, time_in_seconds)`.
Returns
-------
tuple
All elements of the path associated with this object as a tuple.
Notes
-----
If path points to a non-existant file, the size and datetime will
be returned as None (NoneType).
"""
return (self._full, self._driv, self._path, self._name, self._ext, self._size, self._time)
|
[
"def",
"all",
"(",
"self",
")",
":",
"return",
"(",
"self",
".",
"_full",
",",
"self",
".",
"_driv",
",",
"self",
".",
"_path",
",",
"self",
".",
"_name",
",",
"self",
".",
"_ext",
",",
"self",
".",
"_size",
",",
"self",
".",
"_time",
")"
] |
Returns a tuple containing all elements of the object
This method returns all elements of the path in the form of a tuple.
e.g.: `(abs_path, drive_letter, path_only, rootname, extension,
filesize, time_in_seconds)`.
Returns
-------
tuple
All elements of the path associated with this object as a tuple.
Notes
-----
If path points to a non-existant file, the size and datetime will
be returned as None (NoneType).
|
[
"Returns",
"a",
"tuple",
"containing",
"all",
"elements",
"of",
"the",
"object"
] |
3c288889ab27fc29b065016853faecd0a14c4224
|
https://github.com/kenlowrie/pylib/blob/3c288889ab27fc29b065016853faecd0a14c4224/kenl380/pylib/pylib.py#L237-L255
|
240,594
|
kenlowrie/pylib
|
kenl380/pylib/pylib.py
|
ntpx.format
|
def format(self, fmt):
"""Returns string representing the items specified in the format string
The format string can contain:
.. code::
d - drive letter
p - path
n - name
x - extension
z - file size
t - file time in seconds
And, you can string them together, e.g. `dpnx` returns the fully
qualified name.
On platforms like Unix, where drive letter doesn't make sense, it's simply
ignored when used in a format string, making it easy to construct fully
qualified path names in an os independent manner.
Parameters
----------
fmt : str
A string representing the elements you want returned.
Returns
-------
str
A string containing the elements of the path requested in `fmt`
"""
val = ''
for x in fmt:
if x == 'd':
val += self._driv
elif x == 'p':
val += self._path
elif x == 'n':
val += self._name
elif x == 'x':
val += self._ext
elif x == 'z':
if self._size != None: val += str(self._size)
elif x == 't':
if self._time != None: val += str(self._time)
return val
|
python
|
def format(self, fmt):
"""Returns string representing the items specified in the format string
The format string can contain:
.. code::
d - drive letter
p - path
n - name
x - extension
z - file size
t - file time in seconds
And, you can string them together, e.g. `dpnx` returns the fully
qualified name.
On platforms like Unix, where drive letter doesn't make sense, it's simply
ignored when used in a format string, making it easy to construct fully
qualified path names in an os independent manner.
Parameters
----------
fmt : str
A string representing the elements you want returned.
Returns
-------
str
A string containing the elements of the path requested in `fmt`
"""
val = ''
for x in fmt:
if x == 'd':
val += self._driv
elif x == 'p':
val += self._path
elif x == 'n':
val += self._name
elif x == 'x':
val += self._ext
elif x == 'z':
if self._size != None: val += str(self._size)
elif x == 't':
if self._time != None: val += str(self._time)
return val
|
[
"def",
"format",
"(",
"self",
",",
"fmt",
")",
":",
"val",
"=",
"''",
"for",
"x",
"in",
"fmt",
":",
"if",
"x",
"==",
"'d'",
":",
"val",
"+=",
"self",
".",
"_driv",
"elif",
"x",
"==",
"'p'",
":",
"val",
"+=",
"self",
".",
"_path",
"elif",
"x",
"==",
"'n'",
":",
"val",
"+=",
"self",
".",
"_name",
"elif",
"x",
"==",
"'x'",
":",
"val",
"+=",
"self",
".",
"_ext",
"elif",
"x",
"==",
"'z'",
":",
"if",
"self",
".",
"_size",
"!=",
"None",
":",
"val",
"+=",
"str",
"(",
"self",
".",
"_size",
")",
"elif",
"x",
"==",
"'t'",
":",
"if",
"self",
".",
"_time",
"!=",
"None",
":",
"val",
"+=",
"str",
"(",
"self",
".",
"_time",
")",
"return",
"val"
] |
Returns string representing the items specified in the format string
The format string can contain:
.. code::
d - drive letter
p - path
n - name
x - extension
z - file size
t - file time in seconds
And, you can string them together, e.g. `dpnx` returns the fully
qualified name.
On platforms like Unix, where drive letter doesn't make sense, it's simply
ignored when used in a format string, making it easy to construct fully
qualified path names in an os independent manner.
Parameters
----------
fmt : str
A string representing the elements you want returned.
Returns
-------
str
A string containing the elements of the path requested in `fmt`
|
[
"Returns",
"string",
"representing",
"the",
"items",
"specified",
"in",
"the",
"format",
"string"
] |
3c288889ab27fc29b065016853faecd0a14c4224
|
https://github.com/kenlowrie/pylib/blob/3c288889ab27fc29b065016853faecd0a14c4224/kenl380/pylib/pylib.py#L257-L309
|
240,595
|
lvh/maxims
|
maxims/caching.py
|
cached
|
def cached(attr):
"""
In-memory caching for a nullary callable.
"""
def decorator(f):
@functools.wraps(f)
def decorated(self):
try:
return getattr(self, attr)
except AttributeError:
value = f(self)
setattr(self, attr, value)
return value
return decorated
return decorator
|
python
|
def cached(attr):
"""
In-memory caching for a nullary callable.
"""
def decorator(f):
@functools.wraps(f)
def decorated(self):
try:
return getattr(self, attr)
except AttributeError:
value = f(self)
setattr(self, attr, value)
return value
return decorated
return decorator
|
[
"def",
"cached",
"(",
"attr",
")",
":",
"def",
"decorator",
"(",
"f",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"f",
")",
"def",
"decorated",
"(",
"self",
")",
":",
"try",
":",
"return",
"getattr",
"(",
"self",
",",
"attr",
")",
"except",
"AttributeError",
":",
"value",
"=",
"f",
"(",
"self",
")",
"setattr",
"(",
"self",
",",
"attr",
",",
"value",
")",
"return",
"value",
"return",
"decorated",
"return",
"decorator"
] |
In-memory caching for a nullary callable.
|
[
"In",
"-",
"memory",
"caching",
"for",
"a",
"nullary",
"callable",
"."
] |
5c53b25d2cc4ccecbfe90193ade9ce0dbfbe4623
|
https://github.com/lvh/maxims/blob/5c53b25d2cc4ccecbfe90193ade9ce0dbfbe4623/maxims/caching.py#L7-L22
|
240,596
|
OpenGov/carpenter
|
carpenter/blocks/cellanalyzer.py
|
check_cell_type
|
def check_cell_type(cell, cell_type):
'''
Checks the cell type to see if it represents the cell_type passed in.
Args:
cell_type: The type id for a cell match or None for empty match.
'''
if cell_type == None or cell_type == type(None):
return cell == None or (isinstance(cell, basestring) and not cell)
else:
return isinstance(cell, cell_type)
|
python
|
def check_cell_type(cell, cell_type):
'''
Checks the cell type to see if it represents the cell_type passed in.
Args:
cell_type: The type id for a cell match or None for empty match.
'''
if cell_type == None or cell_type == type(None):
return cell == None or (isinstance(cell, basestring) and not cell)
else:
return isinstance(cell, cell_type)
|
[
"def",
"check_cell_type",
"(",
"cell",
",",
"cell_type",
")",
":",
"if",
"cell_type",
"==",
"None",
"or",
"cell_type",
"==",
"type",
"(",
"None",
")",
":",
"return",
"cell",
"==",
"None",
"or",
"(",
"isinstance",
"(",
"cell",
",",
"basestring",
")",
"and",
"not",
"cell",
")",
"else",
":",
"return",
"isinstance",
"(",
"cell",
",",
"cell_type",
")"
] |
Checks the cell type to see if it represents the cell_type passed in.
Args:
cell_type: The type id for a cell match or None for empty match.
|
[
"Checks",
"the",
"cell",
"type",
"to",
"see",
"if",
"it",
"represents",
"the",
"cell_type",
"passed",
"in",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/cellanalyzer.py#L44-L54
|
240,597
|
OpenGov/carpenter
|
carpenter/blocks/cellanalyzer.py
|
auto_convert_cell_no_flags
|
def auto_convert_cell_no_flags(cell, units=None, parens_as_neg=True):
'''
Performs a first step conversion of the cell to check
it's type or try to convert if a valid conversion exists.
This version of conversion doesn't flag changes nor store
cell units.
Args:
units: The dictionary holder for cell units.
parens_as_neg: Converts numerics surrounded by parens to
negative values
'''
units = units if units != None else {}
return auto_convert_cell(flagable=Flagable(), cell=cell, position=None, worksheet=0,
flags={}, units=units, parens_as_neg=parens_as_neg)
|
python
|
def auto_convert_cell_no_flags(cell, units=None, parens_as_neg=True):
'''
Performs a first step conversion of the cell to check
it's type or try to convert if a valid conversion exists.
This version of conversion doesn't flag changes nor store
cell units.
Args:
units: The dictionary holder for cell units.
parens_as_neg: Converts numerics surrounded by parens to
negative values
'''
units = units if units != None else {}
return auto_convert_cell(flagable=Flagable(), cell=cell, position=None, worksheet=0,
flags={}, units=units, parens_as_neg=parens_as_neg)
|
[
"def",
"auto_convert_cell_no_flags",
"(",
"cell",
",",
"units",
"=",
"None",
",",
"parens_as_neg",
"=",
"True",
")",
":",
"units",
"=",
"units",
"if",
"units",
"!=",
"None",
"else",
"{",
"}",
"return",
"auto_convert_cell",
"(",
"flagable",
"=",
"Flagable",
"(",
")",
",",
"cell",
"=",
"cell",
",",
"position",
"=",
"None",
",",
"worksheet",
"=",
"0",
",",
"flags",
"=",
"{",
"}",
",",
"units",
"=",
"units",
",",
"parens_as_neg",
"=",
"parens_as_neg",
")"
] |
Performs a first step conversion of the cell to check
it's type or try to convert if a valid conversion exists.
This version of conversion doesn't flag changes nor store
cell units.
Args:
units: The dictionary holder for cell units.
parens_as_neg: Converts numerics surrounded by parens to
negative values
|
[
"Performs",
"a",
"first",
"step",
"conversion",
"of",
"the",
"cell",
"to",
"check",
"it",
"s",
"type",
"or",
"try",
"to",
"convert",
"if",
"a",
"valid",
"conversion",
"exists",
".",
"This",
"version",
"of",
"conversion",
"doesn",
"t",
"flag",
"changes",
"nor",
"store",
"cell",
"units",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/cellanalyzer.py#L56-L70
|
240,598
|
OpenGov/carpenter
|
carpenter/blocks/cellanalyzer.py
|
auto_convert_cell
|
def auto_convert_cell(flagable, cell, position, worksheet, flags, units, parens_as_neg=True):
'''
Performs a first step conversion of the cell to check
it's type or try to convert if a valid conversion exists.
Args:
parens_as_neg: Converts numerics surrounded by parens to negative values
'''
conversion = cell
# Is an numeric?
if isinstance(cell, (int, float)):
pass
# Is a string?
elif isinstance(cell, basestring):
# Blank cell?
if not cell:
conversion = None
else:
conversion = auto_convert_string_cell(flagable, cell, position, worksheet,
flags, units, parens_as_neg=parens_as_neg)
# Is something else?? Convert to string
elif cell != None:
# Since we shouldn't get this event from most file types,
# make this a warning level conversion flag
flagable.flag_change(flags, 'warning', position, worksheet,
flagable.FLAGS['unknown-to-string'])
conversion = str(cell)
# Empty cell?
if not conversion:
conversion = None
else:
# Otherwise we have an empty cell
pass
return conversion
|
python
|
def auto_convert_cell(flagable, cell, position, worksheet, flags, units, parens_as_neg=True):
'''
Performs a first step conversion of the cell to check
it's type or try to convert if a valid conversion exists.
Args:
parens_as_neg: Converts numerics surrounded by parens to negative values
'''
conversion = cell
# Is an numeric?
if isinstance(cell, (int, float)):
pass
# Is a string?
elif isinstance(cell, basestring):
# Blank cell?
if not cell:
conversion = None
else:
conversion = auto_convert_string_cell(flagable, cell, position, worksheet,
flags, units, parens_as_neg=parens_as_neg)
# Is something else?? Convert to string
elif cell != None:
# Since we shouldn't get this event from most file types,
# make this a warning level conversion flag
flagable.flag_change(flags, 'warning', position, worksheet,
flagable.FLAGS['unknown-to-string'])
conversion = str(cell)
# Empty cell?
if not conversion:
conversion = None
else:
# Otherwise we have an empty cell
pass
return conversion
|
[
"def",
"auto_convert_cell",
"(",
"flagable",
",",
"cell",
",",
"position",
",",
"worksheet",
",",
"flags",
",",
"units",
",",
"parens_as_neg",
"=",
"True",
")",
":",
"conversion",
"=",
"cell",
"# Is an numeric?",
"if",
"isinstance",
"(",
"cell",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"pass",
"# Is a string?",
"elif",
"isinstance",
"(",
"cell",
",",
"basestring",
")",
":",
"# Blank cell?",
"if",
"not",
"cell",
":",
"conversion",
"=",
"None",
"else",
":",
"conversion",
"=",
"auto_convert_string_cell",
"(",
"flagable",
",",
"cell",
",",
"position",
",",
"worksheet",
",",
"flags",
",",
"units",
",",
"parens_as_neg",
"=",
"parens_as_neg",
")",
"# Is something else?? Convert to string",
"elif",
"cell",
"!=",
"None",
":",
"# Since we shouldn't get this event from most file types,",
"# make this a warning level conversion flag",
"flagable",
".",
"flag_change",
"(",
"flags",
",",
"'warning'",
",",
"position",
",",
"worksheet",
",",
"flagable",
".",
"FLAGS",
"[",
"'unknown-to-string'",
"]",
")",
"conversion",
"=",
"str",
"(",
"cell",
")",
"# Empty cell?",
"if",
"not",
"conversion",
":",
"conversion",
"=",
"None",
"else",
":",
"# Otherwise we have an empty cell",
"pass",
"return",
"conversion"
] |
Performs a first step conversion of the cell to check
it's type or try to convert if a valid conversion exists.
Args:
parens_as_neg: Converts numerics surrounded by parens to negative values
|
[
"Performs",
"a",
"first",
"step",
"conversion",
"of",
"the",
"cell",
"to",
"check",
"it",
"s",
"type",
"or",
"try",
"to",
"convert",
"if",
"a",
"valid",
"conversion",
"exists",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/cellanalyzer.py#L72-L107
|
240,599
|
OpenGov/carpenter
|
carpenter/blocks/cellanalyzer.py
|
auto_convert_string_cell
|
def auto_convert_string_cell(flagable, cell_str, position, worksheet, flags,
units, parens_as_neg=True):
'''
Handles the string case of cell and attempts auto-conversion
for auto_convert_cell.
Args:
parens_as_neg: Converts numerics surrounded by parens to negative values
'''
conversion = cell_str.strip()
# Wrapped?
if re.search(allregex.control_wrapping_regex, cell_str):
# Drop the wrapping characters
stripped_cell = cell_str.strip()
mod_cell_str = stripped_cell[1:][:-1].strip()
neg_mult = False
# If the wrapping characters are '(' and ')' and the interior is a number,
# then the number should be interpreted as a negative value
if (stripped_cell[0] == '(' and stripped_cell[-1] == ')' and
re.search(allregex.contains_numerical_regex, mod_cell_str)):
# Flag for conversion to negative
neg_mult = True
flagable.flag_change(flags, 'interpreted', position, worksheet,
flagable.FLAGS['removed-wrapping'])
# Try again without wrapping
converted_value = auto_convert_cell(flagable, mod_cell_str, position,
worksheet, flags, units)
neg_mult = neg_mult and check_cell_type(converted_value, get_cell_type(0))
if neg_mult and parens_as_neg:
flagable.flag_change(flags, 'interpreted', position, worksheet,
flagable.FLAGS['converted-wrapping-to-neg'])
return -converted_value if neg_mult else converted_value
# Is a string containing numbers?
elif re.search(allregex.contains_numerical_regex, cell_str):
conversion = auto_convert_numeric_string_cell(flagable, conversion, position,
worksheet, flags, units)
elif re.search(allregex.bool_regex, cell_str):
flagable.flag_change(flags, 'interpreted', position, worksheet,
flagable.FLAGS['bool-to-int'])
conversion = 1 if re.search(allregex.true_bool_regex, cell_str) else 0
return conversion
|
python
|
def auto_convert_string_cell(flagable, cell_str, position, worksheet, flags,
units, parens_as_neg=True):
'''
Handles the string case of cell and attempts auto-conversion
for auto_convert_cell.
Args:
parens_as_neg: Converts numerics surrounded by parens to negative values
'''
conversion = cell_str.strip()
# Wrapped?
if re.search(allregex.control_wrapping_regex, cell_str):
# Drop the wrapping characters
stripped_cell = cell_str.strip()
mod_cell_str = stripped_cell[1:][:-1].strip()
neg_mult = False
# If the wrapping characters are '(' and ')' and the interior is a number,
# then the number should be interpreted as a negative value
if (stripped_cell[0] == '(' and stripped_cell[-1] == ')' and
re.search(allregex.contains_numerical_regex, mod_cell_str)):
# Flag for conversion to negative
neg_mult = True
flagable.flag_change(flags, 'interpreted', position, worksheet,
flagable.FLAGS['removed-wrapping'])
# Try again without wrapping
converted_value = auto_convert_cell(flagable, mod_cell_str, position,
worksheet, flags, units)
neg_mult = neg_mult and check_cell_type(converted_value, get_cell_type(0))
if neg_mult and parens_as_neg:
flagable.flag_change(flags, 'interpreted', position, worksheet,
flagable.FLAGS['converted-wrapping-to-neg'])
return -converted_value if neg_mult else converted_value
# Is a string containing numbers?
elif re.search(allregex.contains_numerical_regex, cell_str):
conversion = auto_convert_numeric_string_cell(flagable, conversion, position,
worksheet, flags, units)
elif re.search(allregex.bool_regex, cell_str):
flagable.flag_change(flags, 'interpreted', position, worksheet,
flagable.FLAGS['bool-to-int'])
conversion = 1 if re.search(allregex.true_bool_regex, cell_str) else 0
return conversion
|
[
"def",
"auto_convert_string_cell",
"(",
"flagable",
",",
"cell_str",
",",
"position",
",",
"worksheet",
",",
"flags",
",",
"units",
",",
"parens_as_neg",
"=",
"True",
")",
":",
"conversion",
"=",
"cell_str",
".",
"strip",
"(",
")",
"# Wrapped?",
"if",
"re",
".",
"search",
"(",
"allregex",
".",
"control_wrapping_regex",
",",
"cell_str",
")",
":",
"# Drop the wrapping characters",
"stripped_cell",
"=",
"cell_str",
".",
"strip",
"(",
")",
"mod_cell_str",
"=",
"stripped_cell",
"[",
"1",
":",
"]",
"[",
":",
"-",
"1",
"]",
".",
"strip",
"(",
")",
"neg_mult",
"=",
"False",
"# If the wrapping characters are '(' and ')' and the interior is a number,",
"# then the number should be interpreted as a negative value",
"if",
"(",
"stripped_cell",
"[",
"0",
"]",
"==",
"'('",
"and",
"stripped_cell",
"[",
"-",
"1",
"]",
"==",
"')'",
"and",
"re",
".",
"search",
"(",
"allregex",
".",
"contains_numerical_regex",
",",
"mod_cell_str",
")",
")",
":",
"# Flag for conversion to negative",
"neg_mult",
"=",
"True",
"flagable",
".",
"flag_change",
"(",
"flags",
",",
"'interpreted'",
",",
"position",
",",
"worksheet",
",",
"flagable",
".",
"FLAGS",
"[",
"'removed-wrapping'",
"]",
")",
"# Try again without wrapping",
"converted_value",
"=",
"auto_convert_cell",
"(",
"flagable",
",",
"mod_cell_str",
",",
"position",
",",
"worksheet",
",",
"flags",
",",
"units",
")",
"neg_mult",
"=",
"neg_mult",
"and",
"check_cell_type",
"(",
"converted_value",
",",
"get_cell_type",
"(",
"0",
")",
")",
"if",
"neg_mult",
"and",
"parens_as_neg",
":",
"flagable",
".",
"flag_change",
"(",
"flags",
",",
"'interpreted'",
",",
"position",
",",
"worksheet",
",",
"flagable",
".",
"FLAGS",
"[",
"'converted-wrapping-to-neg'",
"]",
")",
"return",
"-",
"converted_value",
"if",
"neg_mult",
"else",
"converted_value",
"# Is a string containing numbers?",
"elif",
"re",
".",
"search",
"(",
"allregex",
".",
"contains_numerical_regex",
",",
"cell_str",
")",
":",
"conversion",
"=",
"auto_convert_numeric_string_cell",
"(",
"flagable",
",",
"conversion",
",",
"position",
",",
"worksheet",
",",
"flags",
",",
"units",
")",
"elif",
"re",
".",
"search",
"(",
"allregex",
".",
"bool_regex",
",",
"cell_str",
")",
":",
"flagable",
".",
"flag_change",
"(",
"flags",
",",
"'interpreted'",
",",
"position",
",",
"worksheet",
",",
"flagable",
".",
"FLAGS",
"[",
"'bool-to-int'",
"]",
")",
"conversion",
"=",
"1",
"if",
"re",
".",
"search",
"(",
"allregex",
".",
"true_bool_regex",
",",
"cell_str",
")",
"else",
"0",
"return",
"conversion"
] |
Handles the string case of cell and attempts auto-conversion
for auto_convert_cell.
Args:
parens_as_neg: Converts numerics surrounded by parens to negative values
|
[
"Handles",
"the",
"string",
"case",
"of",
"cell",
"and",
"attempts",
"auto",
"-",
"conversion",
"for",
"auto_convert_cell",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/cellanalyzer.py#L109-L151
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.