id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
238,500
|
CenterForOpenScience/sharepa
|
sharepa/analysis.py
|
bucket_to_dataframe
|
def bucket_to_dataframe(name, buckets, append_name=None):
'''A function that turns elasticsearch aggregation buckets into dataframes
:param name: The name of the bucket (will be a column in the dataframe)
:type name: str
:param bucket: a bucket from elasticsearch results
:type bucket: list[dict]
:returns: pandas.DataFrame
'''
expanded_buckets = []
for item in buckets:
if type(item) is dict:
single_dict = item
else:
single_dict = item.to_dict()
single_dict[name] = single_dict.pop('doc_count')
if append_name:
persistance_dict = single_dict.copy()
for key in persistance_dict.keys():
single_dict[append_name + '.' + key] = single_dict.pop(key)
expanded_buckets.append(single_dict)
return pd.DataFrame(expanded_buckets)
|
python
|
def bucket_to_dataframe(name, buckets, append_name=None):
'''A function that turns elasticsearch aggregation buckets into dataframes
:param name: The name of the bucket (will be a column in the dataframe)
:type name: str
:param bucket: a bucket from elasticsearch results
:type bucket: list[dict]
:returns: pandas.DataFrame
'''
expanded_buckets = []
for item in buckets:
if type(item) is dict:
single_dict = item
else:
single_dict = item.to_dict()
single_dict[name] = single_dict.pop('doc_count')
if append_name:
persistance_dict = single_dict.copy()
for key in persistance_dict.keys():
single_dict[append_name + '.' + key] = single_dict.pop(key)
expanded_buckets.append(single_dict)
return pd.DataFrame(expanded_buckets)
|
[
"def",
"bucket_to_dataframe",
"(",
"name",
",",
"buckets",
",",
"append_name",
"=",
"None",
")",
":",
"expanded_buckets",
"=",
"[",
"]",
"for",
"item",
"in",
"buckets",
":",
"if",
"type",
"(",
"item",
")",
"is",
"dict",
":",
"single_dict",
"=",
"item",
"else",
":",
"single_dict",
"=",
"item",
".",
"to_dict",
"(",
")",
"single_dict",
"[",
"name",
"]",
"=",
"single_dict",
".",
"pop",
"(",
"'doc_count'",
")",
"if",
"append_name",
":",
"persistance_dict",
"=",
"single_dict",
".",
"copy",
"(",
")",
"for",
"key",
"in",
"persistance_dict",
".",
"keys",
"(",
")",
":",
"single_dict",
"[",
"append_name",
"+",
"'.'",
"+",
"key",
"]",
"=",
"single_dict",
".",
"pop",
"(",
"key",
")",
"expanded_buckets",
".",
"append",
"(",
"single_dict",
")",
"return",
"pd",
".",
"DataFrame",
"(",
"expanded_buckets",
")"
] |
A function that turns elasticsearch aggregation buckets into dataframes
:param name: The name of the bucket (will be a column in the dataframe)
:type name: str
:param bucket: a bucket from elasticsearch results
:type bucket: list[dict]
:returns: pandas.DataFrame
|
[
"A",
"function",
"that",
"turns",
"elasticsearch",
"aggregation",
"buckets",
"into",
"dataframes"
] |
5ea69b160080f0b9c655012f17fabaa1bcc02ae0
|
https://github.com/CenterForOpenScience/sharepa/blob/5ea69b160080f0b9c655012f17fabaa1bcc02ae0/sharepa/analysis.py#L4-L25
|
238,501
|
CenterForOpenScience/sharepa
|
sharepa/analysis.py
|
agg_to_two_dim_dataframe
|
def agg_to_two_dim_dataframe(agg):
'''A function that takes an elasticsearch response with aggregation and returns the names of all bucket value pairs
:param agg: an aggregation from elasticsearch results
:type agg: elasticsearch response.aggregation.agg_name object
:returns: pandas data frame of one or two dimetions depending on input data
'''
expanded_agg = []
for bucket in agg.buckets:
bucket_as_dict = bucket.to_dict()
if dict not in [type(item) for item in bucket_as_dict.values()]:
return bucket_to_dataframe('doc_count', agg.buckets)
else:
lower_level_dict = [item for item in bucket_as_dict.keys() if type(bucket_as_dict[item]) is dict]
if len(lower_level_dict) > 1:
raise ValueError('Two dimensional data can only convert a 2 level aggregation (with 1 aggregation at each level)')
name_of_lower_level = lower_level_dict[0]
single_level_dataframe = bucket_to_dataframe(bucket.key,
bucket[name_of_lower_level]['buckets'],
name_of_lower_level)
expanded_agg.append(single_level_dataframe)
merged_results = merge_dataframes(*expanded_agg)
# rearrange to get key as first col
cols = merged_results.columns.tolist()
indices_of_keys = [i for i, s in enumerate(cols) if 'key' in s]
all_other_cols = [i for i in range(0, len(cols)) if i not in indices_of_keys]
new_col_order = indices_of_keys + all_other_cols
return merged_results[new_col_order]
|
python
|
def agg_to_two_dim_dataframe(agg):
'''A function that takes an elasticsearch response with aggregation and returns the names of all bucket value pairs
:param agg: an aggregation from elasticsearch results
:type agg: elasticsearch response.aggregation.agg_name object
:returns: pandas data frame of one or two dimetions depending on input data
'''
expanded_agg = []
for bucket in agg.buckets:
bucket_as_dict = bucket.to_dict()
if dict not in [type(item) for item in bucket_as_dict.values()]:
return bucket_to_dataframe('doc_count', agg.buckets)
else:
lower_level_dict = [item for item in bucket_as_dict.keys() if type(bucket_as_dict[item]) is dict]
if len(lower_level_dict) > 1:
raise ValueError('Two dimensional data can only convert a 2 level aggregation (with 1 aggregation at each level)')
name_of_lower_level = lower_level_dict[0]
single_level_dataframe = bucket_to_dataframe(bucket.key,
bucket[name_of_lower_level]['buckets'],
name_of_lower_level)
expanded_agg.append(single_level_dataframe)
merged_results = merge_dataframes(*expanded_agg)
# rearrange to get key as first col
cols = merged_results.columns.tolist()
indices_of_keys = [i for i, s in enumerate(cols) if 'key' in s]
all_other_cols = [i for i in range(0, len(cols)) if i not in indices_of_keys]
new_col_order = indices_of_keys + all_other_cols
return merged_results[new_col_order]
|
[
"def",
"agg_to_two_dim_dataframe",
"(",
"agg",
")",
":",
"expanded_agg",
"=",
"[",
"]",
"for",
"bucket",
"in",
"agg",
".",
"buckets",
":",
"bucket_as_dict",
"=",
"bucket",
".",
"to_dict",
"(",
")",
"if",
"dict",
"not",
"in",
"[",
"type",
"(",
"item",
")",
"for",
"item",
"in",
"bucket_as_dict",
".",
"values",
"(",
")",
"]",
":",
"return",
"bucket_to_dataframe",
"(",
"'doc_count'",
",",
"agg",
".",
"buckets",
")",
"else",
":",
"lower_level_dict",
"=",
"[",
"item",
"for",
"item",
"in",
"bucket_as_dict",
".",
"keys",
"(",
")",
"if",
"type",
"(",
"bucket_as_dict",
"[",
"item",
"]",
")",
"is",
"dict",
"]",
"if",
"len",
"(",
"lower_level_dict",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"'Two dimensional data can only convert a 2 level aggregation (with 1 aggregation at each level)'",
")",
"name_of_lower_level",
"=",
"lower_level_dict",
"[",
"0",
"]",
"single_level_dataframe",
"=",
"bucket_to_dataframe",
"(",
"bucket",
".",
"key",
",",
"bucket",
"[",
"name_of_lower_level",
"]",
"[",
"'buckets'",
"]",
",",
"name_of_lower_level",
")",
"expanded_agg",
".",
"append",
"(",
"single_level_dataframe",
")",
"merged_results",
"=",
"merge_dataframes",
"(",
"*",
"expanded_agg",
")",
"# rearrange to get key as first col",
"cols",
"=",
"merged_results",
".",
"columns",
".",
"tolist",
"(",
")",
"indices_of_keys",
"=",
"[",
"i",
"for",
"i",
",",
"s",
"in",
"enumerate",
"(",
"cols",
")",
"if",
"'key'",
"in",
"s",
"]",
"all_other_cols",
"=",
"[",
"i",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"cols",
")",
")",
"if",
"i",
"not",
"in",
"indices_of_keys",
"]",
"new_col_order",
"=",
"indices_of_keys",
"+",
"all_other_cols",
"return",
"merged_results",
"[",
"new_col_order",
"]"
] |
A function that takes an elasticsearch response with aggregation and returns the names of all bucket value pairs
:param agg: an aggregation from elasticsearch results
:type agg: elasticsearch response.aggregation.agg_name object
:returns: pandas data frame of one or two dimetions depending on input data
|
[
"A",
"function",
"that",
"takes",
"an",
"elasticsearch",
"response",
"with",
"aggregation",
"and",
"returns",
"the",
"names",
"of",
"all",
"bucket",
"value",
"pairs"
] |
5ea69b160080f0b9c655012f17fabaa1bcc02ae0
|
https://github.com/CenterForOpenScience/sharepa/blob/5ea69b160080f0b9c655012f17fabaa1bcc02ae0/sharepa/analysis.py#L28-L55
|
238,502
|
CenterForOpenScience/sharepa
|
sharepa/analysis.py
|
merge_dataframes
|
def merge_dataframes(*dfs):
'''A helper function for merging two dataframes that have the same indices, duplicate columns are removed
:param dfs: a list of dataframes to be merged (note: they must have the same indices)
:type dfs: list[pandas.DataFrame]
:returns: pandas.DataFrame -- a merged dataframe
'''
merged_dataframe = pd.concat(dfs, axis=1, join_axes=[dfs[0].index])
return merged_dataframe.transpose().drop_duplicates().transpose()
|
python
|
def merge_dataframes(*dfs):
'''A helper function for merging two dataframes that have the same indices, duplicate columns are removed
:param dfs: a list of dataframes to be merged (note: they must have the same indices)
:type dfs: list[pandas.DataFrame]
:returns: pandas.DataFrame -- a merged dataframe
'''
merged_dataframe = pd.concat(dfs, axis=1, join_axes=[dfs[0].index])
return merged_dataframe.transpose().drop_duplicates().transpose()
|
[
"def",
"merge_dataframes",
"(",
"*",
"dfs",
")",
":",
"merged_dataframe",
"=",
"pd",
".",
"concat",
"(",
"dfs",
",",
"axis",
"=",
"1",
",",
"join_axes",
"=",
"[",
"dfs",
"[",
"0",
"]",
".",
"index",
"]",
")",
"return",
"merged_dataframe",
".",
"transpose",
"(",
")",
".",
"drop_duplicates",
"(",
")",
".",
"transpose",
"(",
")"
] |
A helper function for merging two dataframes that have the same indices, duplicate columns are removed
:param dfs: a list of dataframes to be merged (note: they must have the same indices)
:type dfs: list[pandas.DataFrame]
:returns: pandas.DataFrame -- a merged dataframe
|
[
"A",
"helper",
"function",
"for",
"merging",
"two",
"dataframes",
"that",
"have",
"the",
"same",
"indices",
"duplicate",
"columns",
"are",
"removed"
] |
5ea69b160080f0b9c655012f17fabaa1bcc02ae0
|
https://github.com/CenterForOpenScience/sharepa/blob/5ea69b160080f0b9c655012f17fabaa1bcc02ae0/sharepa/analysis.py#L58-L66
|
238,503
|
etcher-be/elib_miz
|
elib_miz/miz.py
|
Miz.decode
|
def decode(self):
"""Decodes the mission files into dictionaries"""
LOGGER.debug('decoding lua tables')
if not self.zip_content:
self.unzip()
LOGGER.debug('reading map resource file')
with open(str(self.map_res_file), encoding=ENCODING) as stream:
self._map_res, self._map_res_qual = SLTP().decode(stream.read())
LOGGER.debug('reading l10n file')
with open(str(self.dictionary_file), encoding=ENCODING) as stream:
self._l10n, self._l10n_qual = SLTP().decode(stream.read())
LOGGER.debug('reading mission file')
with open(str(self.mission_file), encoding=ENCODING) as stream:
mission_data, self._mission_qual = SLTP().decode(stream.read())
self._mission = Mission(mission_data, self._l10n)
LOGGER.debug('gathering resources')
for file in Path(self.temp_dir, 'l10n', 'DEFAULT').iterdir():
if file.name in ('dictionary', 'mapResource'):
continue
LOGGER.debug('found resource: %s', file.name)
self._resources.add(file.name)
LOGGER.debug('decoding done')
|
python
|
def decode(self):
"""Decodes the mission files into dictionaries"""
LOGGER.debug('decoding lua tables')
if not self.zip_content:
self.unzip()
LOGGER.debug('reading map resource file')
with open(str(self.map_res_file), encoding=ENCODING) as stream:
self._map_res, self._map_res_qual = SLTP().decode(stream.read())
LOGGER.debug('reading l10n file')
with open(str(self.dictionary_file), encoding=ENCODING) as stream:
self._l10n, self._l10n_qual = SLTP().decode(stream.read())
LOGGER.debug('reading mission file')
with open(str(self.mission_file), encoding=ENCODING) as stream:
mission_data, self._mission_qual = SLTP().decode(stream.read())
self._mission = Mission(mission_data, self._l10n)
LOGGER.debug('gathering resources')
for file in Path(self.temp_dir, 'l10n', 'DEFAULT').iterdir():
if file.name in ('dictionary', 'mapResource'):
continue
LOGGER.debug('found resource: %s', file.name)
self._resources.add(file.name)
LOGGER.debug('decoding done')
|
[
"def",
"decode",
"(",
"self",
")",
":",
"LOGGER",
".",
"debug",
"(",
"'decoding lua tables'",
")",
"if",
"not",
"self",
".",
"zip_content",
":",
"self",
".",
"unzip",
"(",
")",
"LOGGER",
".",
"debug",
"(",
"'reading map resource file'",
")",
"with",
"open",
"(",
"str",
"(",
"self",
".",
"map_res_file",
")",
",",
"encoding",
"=",
"ENCODING",
")",
"as",
"stream",
":",
"self",
".",
"_map_res",
",",
"self",
".",
"_map_res_qual",
"=",
"SLTP",
"(",
")",
".",
"decode",
"(",
"stream",
".",
"read",
"(",
")",
")",
"LOGGER",
".",
"debug",
"(",
"'reading l10n file'",
")",
"with",
"open",
"(",
"str",
"(",
"self",
".",
"dictionary_file",
")",
",",
"encoding",
"=",
"ENCODING",
")",
"as",
"stream",
":",
"self",
".",
"_l10n",
",",
"self",
".",
"_l10n_qual",
"=",
"SLTP",
"(",
")",
".",
"decode",
"(",
"stream",
".",
"read",
"(",
")",
")",
"LOGGER",
".",
"debug",
"(",
"'reading mission file'",
")",
"with",
"open",
"(",
"str",
"(",
"self",
".",
"mission_file",
")",
",",
"encoding",
"=",
"ENCODING",
")",
"as",
"stream",
":",
"mission_data",
",",
"self",
".",
"_mission_qual",
"=",
"SLTP",
"(",
")",
".",
"decode",
"(",
"stream",
".",
"read",
"(",
")",
")",
"self",
".",
"_mission",
"=",
"Mission",
"(",
"mission_data",
",",
"self",
".",
"_l10n",
")",
"LOGGER",
".",
"debug",
"(",
"'gathering resources'",
")",
"for",
"file",
"in",
"Path",
"(",
"self",
".",
"temp_dir",
",",
"'l10n'",
",",
"'DEFAULT'",
")",
".",
"iterdir",
"(",
")",
":",
"if",
"file",
".",
"name",
"in",
"(",
"'dictionary'",
",",
"'mapResource'",
")",
":",
"continue",
"LOGGER",
".",
"debug",
"(",
"'found resource: %s'",
",",
"file",
".",
"name",
")",
"self",
".",
"_resources",
".",
"add",
"(",
"file",
".",
"name",
")",
"LOGGER",
".",
"debug",
"(",
"'decoding done'",
")"
] |
Decodes the mission files into dictionaries
|
[
"Decodes",
"the",
"mission",
"files",
"into",
"dictionaries"
] |
f28db58fadb2cd9341e0ae4d65101c0cc7d8f3d7
|
https://github.com/etcher-be/elib_miz/blob/f28db58fadb2cd9341e0ae4d65101c0cc7d8f3d7/elib_miz/miz.py#L245-L273
|
238,504
|
etcher-be/elib_miz
|
elib_miz/miz.py
|
Miz.unzip
|
def unzip(self, overwrite: bool = False):
"""
Flattens a MIZ file into the temp dir
Args:
overwrite: allow overwriting exiting files
"""
if self.zip_content and not overwrite:
raise FileExistsError(str(self.temp_dir))
LOGGER.debug('unzipping miz to temp dir')
try:
with ZipFile(str(self.miz_path)) as zip_file:
LOGGER.debug('reading infolist')
self.zip_content = [f.filename for f in zip_file.infolist()]
self._extract_files_from_zip(zip_file)
except BadZipFile:
raise BadZipFile(str(self.miz_path))
except: # noqa: E722
LOGGER.exception('error while unzipping miz file: %s', self.miz_path)
raise
LOGGER.debug('checking miz content')
# noinspection PyTypeChecker
for miz_item in ['mission', 'options', 'warehouses', 'l10n/DEFAULT/dictionary', 'l10n/DEFAULT/mapResource']:
if not Path(self.temp_dir.joinpath(miz_item)).exists():
LOGGER.error('missing file in miz: %s', miz_item)
raise FileNotFoundError(miz_item)
self._check_extracted_content()
LOGGER.debug('all files have been found, miz successfully unzipped')
|
python
|
def unzip(self, overwrite: bool = False):
"""
Flattens a MIZ file into the temp dir
Args:
overwrite: allow overwriting exiting files
"""
if self.zip_content and not overwrite:
raise FileExistsError(str(self.temp_dir))
LOGGER.debug('unzipping miz to temp dir')
try:
with ZipFile(str(self.miz_path)) as zip_file:
LOGGER.debug('reading infolist')
self.zip_content = [f.filename for f in zip_file.infolist()]
self._extract_files_from_zip(zip_file)
except BadZipFile:
raise BadZipFile(str(self.miz_path))
except: # noqa: E722
LOGGER.exception('error while unzipping miz file: %s', self.miz_path)
raise
LOGGER.debug('checking miz content')
# noinspection PyTypeChecker
for miz_item in ['mission', 'options', 'warehouses', 'l10n/DEFAULT/dictionary', 'l10n/DEFAULT/mapResource']:
if not Path(self.temp_dir.joinpath(miz_item)).exists():
LOGGER.error('missing file in miz: %s', miz_item)
raise FileNotFoundError(miz_item)
self._check_extracted_content()
LOGGER.debug('all files have been found, miz successfully unzipped')
|
[
"def",
"unzip",
"(",
"self",
",",
"overwrite",
":",
"bool",
"=",
"False",
")",
":",
"if",
"self",
".",
"zip_content",
"and",
"not",
"overwrite",
":",
"raise",
"FileExistsError",
"(",
"str",
"(",
"self",
".",
"temp_dir",
")",
")",
"LOGGER",
".",
"debug",
"(",
"'unzipping miz to temp dir'",
")",
"try",
":",
"with",
"ZipFile",
"(",
"str",
"(",
"self",
".",
"miz_path",
")",
")",
"as",
"zip_file",
":",
"LOGGER",
".",
"debug",
"(",
"'reading infolist'",
")",
"self",
".",
"zip_content",
"=",
"[",
"f",
".",
"filename",
"for",
"f",
"in",
"zip_file",
".",
"infolist",
"(",
")",
"]",
"self",
".",
"_extract_files_from_zip",
"(",
"zip_file",
")",
"except",
"BadZipFile",
":",
"raise",
"BadZipFile",
"(",
"str",
"(",
"self",
".",
"miz_path",
")",
")",
"except",
":",
"# noqa: E722",
"LOGGER",
".",
"exception",
"(",
"'error while unzipping miz file: %s'",
",",
"self",
".",
"miz_path",
")",
"raise",
"LOGGER",
".",
"debug",
"(",
"'checking miz content'",
")",
"# noinspection PyTypeChecker",
"for",
"miz_item",
"in",
"[",
"'mission'",
",",
"'options'",
",",
"'warehouses'",
",",
"'l10n/DEFAULT/dictionary'",
",",
"'l10n/DEFAULT/mapResource'",
"]",
":",
"if",
"not",
"Path",
"(",
"self",
".",
"temp_dir",
".",
"joinpath",
"(",
"miz_item",
")",
")",
".",
"exists",
"(",
")",
":",
"LOGGER",
".",
"error",
"(",
"'missing file in miz: %s'",
",",
"miz_item",
")",
"raise",
"FileNotFoundError",
"(",
"miz_item",
")",
"self",
".",
"_check_extracted_content",
"(",
")",
"LOGGER",
".",
"debug",
"(",
"'all files have been found, miz successfully unzipped'",
")"
] |
Flattens a MIZ file into the temp dir
Args:
overwrite: allow overwriting exiting files
|
[
"Flattens",
"a",
"MIZ",
"file",
"into",
"the",
"temp",
"dir"
] |
f28db58fadb2cd9341e0ae4d65101c0cc7d8f3d7
|
https://github.com/etcher-be/elib_miz/blob/f28db58fadb2cd9341e0ae4d65101c0cc7d8f3d7/elib_miz/miz.py#L315-L356
|
238,505
|
luismsgomes/stringology
|
src/stringology/lis.py
|
lis
|
def lis(seq, indices=False):
'''longest increasing subsequence
>>> lis([1, 2, 5, 3, 4])
[1, 2, 3, 4]
'''
if not seq:
return []
# prevs[i] is the index of the previous element in the longest subsequence
# containing element i
prevs = [None] * len(seq)
# tails[i] is the pair (elem, index) of the lowest element of any
# subsequence with length i + 1
tails = [(seq[0], 0)]
for i, elem in enumerate(seq[1:], start=1):
if elem > tails[-1][0]:
prevs[i] = tails[-1][1]
tails.append((elem, i))
continue
# let's find a tail that we can extend
k = bisect(tails, (elem, -1))
if tails[k][0] > elem:
tails[k] = (elem, i)
if k > 0:
prevs[i] = tails[k - 1][1]
_, i = tails[-1]
subseq = []
while i is not None:
subseq.append(i if indices else seq[i])
i = prevs[i]
return subseq[::-1]
|
python
|
def lis(seq, indices=False):
'''longest increasing subsequence
>>> lis([1, 2, 5, 3, 4])
[1, 2, 3, 4]
'''
if not seq:
return []
# prevs[i] is the index of the previous element in the longest subsequence
# containing element i
prevs = [None] * len(seq)
# tails[i] is the pair (elem, index) of the lowest element of any
# subsequence with length i + 1
tails = [(seq[0], 0)]
for i, elem in enumerate(seq[1:], start=1):
if elem > tails[-1][0]:
prevs[i] = tails[-1][1]
tails.append((elem, i))
continue
# let's find a tail that we can extend
k = bisect(tails, (elem, -1))
if tails[k][0] > elem:
tails[k] = (elem, i)
if k > 0:
prevs[i] = tails[k - 1][1]
_, i = tails[-1]
subseq = []
while i is not None:
subseq.append(i if indices else seq[i])
i = prevs[i]
return subseq[::-1]
|
[
"def",
"lis",
"(",
"seq",
",",
"indices",
"=",
"False",
")",
":",
"if",
"not",
"seq",
":",
"return",
"[",
"]",
"# prevs[i] is the index of the previous element in the longest subsequence",
"# containing element i",
"prevs",
"=",
"[",
"None",
"]",
"*",
"len",
"(",
"seq",
")",
"# tails[i] is the pair (elem, index) of the lowest element of any",
"# subsequence with length i + 1",
"tails",
"=",
"[",
"(",
"seq",
"[",
"0",
"]",
",",
"0",
")",
"]",
"for",
"i",
",",
"elem",
"in",
"enumerate",
"(",
"seq",
"[",
"1",
":",
"]",
",",
"start",
"=",
"1",
")",
":",
"if",
"elem",
">",
"tails",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
":",
"prevs",
"[",
"i",
"]",
"=",
"tails",
"[",
"-",
"1",
"]",
"[",
"1",
"]",
"tails",
".",
"append",
"(",
"(",
"elem",
",",
"i",
")",
")",
"continue",
"# let's find a tail that we can extend",
"k",
"=",
"bisect",
"(",
"tails",
",",
"(",
"elem",
",",
"-",
"1",
")",
")",
"if",
"tails",
"[",
"k",
"]",
"[",
"0",
"]",
">",
"elem",
":",
"tails",
"[",
"k",
"]",
"=",
"(",
"elem",
",",
"i",
")",
"if",
"k",
">",
"0",
":",
"prevs",
"[",
"i",
"]",
"=",
"tails",
"[",
"k",
"-",
"1",
"]",
"[",
"1",
"]",
"_",
",",
"i",
"=",
"tails",
"[",
"-",
"1",
"]",
"subseq",
"=",
"[",
"]",
"while",
"i",
"is",
"not",
"None",
":",
"subseq",
".",
"append",
"(",
"i",
"if",
"indices",
"else",
"seq",
"[",
"i",
"]",
")",
"i",
"=",
"prevs",
"[",
"i",
"]",
"return",
"subseq",
"[",
":",
":",
"-",
"1",
"]"
] |
longest increasing subsequence
>>> lis([1, 2, 5, 3, 4])
[1, 2, 3, 4]
|
[
"longest",
"increasing",
"subsequence"
] |
c627dc5a0d4c6af10946040a6463d5495d39d960
|
https://github.com/luismsgomes/stringology/blob/c627dc5a0d4c6af10946040a6463d5495d39d960/src/stringology/lis.py#L4-L34
|
238,506
|
roboogle/gtkmvc3
|
gtkmvco/gtkmvc3/support/factories.py
|
ModelFactory.__fix_bases
|
def __fix_bases(base_classes, have_mt):
"""This function check whether base_classes contains a Model
instance. If not, choose the best fitting class for
model. Furthermore, it makes the list in a cannonical
ordering form in a way that ic can be used as memoization
key"""
fixed = list(base_classes)
contains_model = False
for b in fixed:
if isinstance(fixed, Model): contains_model = True; break
pass
# adds a model when user is lazy
if not contains_model:
if have_mt:
from gtkmvc3.model_mt import ModelMT
fixed.insert(0, ModelMT)
else: fixed.insert(0, Model)
pass
class ModelFactoryWrap (object):
__metaclass__ = get_noconflict_metaclass(tuple(fixed), (), ())
def __init__(self, *args, **kwargs): pass
pass
fixed.append(ModelFactoryWrap)
fixed.sort()
return tuple(fixed)
|
python
|
def __fix_bases(base_classes, have_mt):
"""This function check whether base_classes contains a Model
instance. If not, choose the best fitting class for
model. Furthermore, it makes the list in a cannonical
ordering form in a way that ic can be used as memoization
key"""
fixed = list(base_classes)
contains_model = False
for b in fixed:
if isinstance(fixed, Model): contains_model = True; break
pass
# adds a model when user is lazy
if not contains_model:
if have_mt:
from gtkmvc3.model_mt import ModelMT
fixed.insert(0, ModelMT)
else: fixed.insert(0, Model)
pass
class ModelFactoryWrap (object):
__metaclass__ = get_noconflict_metaclass(tuple(fixed), (), ())
def __init__(self, *args, **kwargs): pass
pass
fixed.append(ModelFactoryWrap)
fixed.sort()
return tuple(fixed)
|
[
"def",
"__fix_bases",
"(",
"base_classes",
",",
"have_mt",
")",
":",
"fixed",
"=",
"list",
"(",
"base_classes",
")",
"contains_model",
"=",
"False",
"for",
"b",
"in",
"fixed",
":",
"if",
"isinstance",
"(",
"fixed",
",",
"Model",
")",
":",
"contains_model",
"=",
"True",
"break",
"pass",
"# adds a model when user is lazy",
"if",
"not",
"contains_model",
":",
"if",
"have_mt",
":",
"from",
"gtkmvc3",
".",
"model_mt",
"import",
"ModelMT",
"fixed",
".",
"insert",
"(",
"0",
",",
"ModelMT",
")",
"else",
":",
"fixed",
".",
"insert",
"(",
"0",
",",
"Model",
")",
"pass",
"class",
"ModelFactoryWrap",
"(",
"object",
")",
":",
"__metaclass__",
"=",
"get_noconflict_metaclass",
"(",
"tuple",
"(",
"fixed",
")",
",",
"(",
")",
",",
"(",
")",
")",
"def",
"__init__",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"pass",
"pass",
"fixed",
".",
"append",
"(",
"ModelFactoryWrap",
")",
"fixed",
".",
"sort",
"(",
")",
"return",
"tuple",
"(",
"fixed",
")"
] |
This function check whether base_classes contains a Model
instance. If not, choose the best fitting class for
model. Furthermore, it makes the list in a cannonical
ordering form in a way that ic can be used as memoization
key
|
[
"This",
"function",
"check",
"whether",
"base_classes",
"contains",
"a",
"Model",
"instance",
".",
"If",
"not",
"choose",
"the",
"best",
"fitting",
"class",
"for",
"model",
".",
"Furthermore",
"it",
"makes",
"the",
"list",
"in",
"a",
"cannonical",
"ordering",
"form",
"in",
"a",
"way",
"that",
"ic",
"can",
"be",
"used",
"as",
"memoization",
"key"
] |
63405fd8d2056be26af49103b13a8d5e57fe4dff
|
https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/gtkmvc3/support/factories.py#L38-L65
|
238,507
|
roboogle/gtkmvc3
|
gtkmvco/gtkmvc3/support/factories.py
|
ModelFactory.make
|
def make(base_classes=(), have_mt=False):
"""Use this static method to build a model class that
possibly derives from other classes. If have_mt is True,
then returned class will take into account multi-threading
issues when dealing with observable properties."""
good_bc = ModelFactory.__fix_bases(base_classes, have_mt)
print "Base classes are:", good_bc
key = "".join(map(str, good_bc))
if key in ModelFactory.__memoized:
return ModelFactory.__memoized[key]
cls = new.classobj('', good_bc, {'__module__': '__main__', '__doc__': None})
ModelFactory.__memoized[key] = cls
return cls
|
python
|
def make(base_classes=(), have_mt=False):
"""Use this static method to build a model class that
possibly derives from other classes. If have_mt is True,
then returned class will take into account multi-threading
issues when dealing with observable properties."""
good_bc = ModelFactory.__fix_bases(base_classes, have_mt)
print "Base classes are:", good_bc
key = "".join(map(str, good_bc))
if key in ModelFactory.__memoized:
return ModelFactory.__memoized[key]
cls = new.classobj('', good_bc, {'__module__': '__main__', '__doc__': None})
ModelFactory.__memoized[key] = cls
return cls
|
[
"def",
"make",
"(",
"base_classes",
"=",
"(",
")",
",",
"have_mt",
"=",
"False",
")",
":",
"good_bc",
"=",
"ModelFactory",
".",
"__fix_bases",
"(",
"base_classes",
",",
"have_mt",
")",
"print",
"\"Base classes are:\"",
",",
"good_bc",
"key",
"=",
"\"\"",
".",
"join",
"(",
"map",
"(",
"str",
",",
"good_bc",
")",
")",
"if",
"key",
"in",
"ModelFactory",
".",
"__memoized",
":",
"return",
"ModelFactory",
".",
"__memoized",
"[",
"key",
"]",
"cls",
"=",
"new",
".",
"classobj",
"(",
"''",
",",
"good_bc",
",",
"{",
"'__module__'",
":",
"'__main__'",
",",
"'__doc__'",
":",
"None",
"}",
")",
"ModelFactory",
".",
"__memoized",
"[",
"key",
"]",
"=",
"cls",
"return",
"cls"
] |
Use this static method to build a model class that
possibly derives from other classes. If have_mt is True,
then returned class will take into account multi-threading
issues when dealing with observable properties.
|
[
"Use",
"this",
"static",
"method",
"to",
"build",
"a",
"model",
"class",
"that",
"possibly",
"derives",
"from",
"other",
"classes",
".",
"If",
"have_mt",
"is",
"True",
"then",
"returned",
"class",
"will",
"take",
"into",
"account",
"multi",
"-",
"threading",
"issues",
"when",
"dealing",
"with",
"observable",
"properties",
"."
] |
63405fd8d2056be26af49103b13a8d5e57fe4dff
|
https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/gtkmvc3/support/factories.py#L68-L82
|
238,508
|
realestate-com-au/dashmat
|
dashmat/core_modules/splunk/splunk-sdk-1.3.0/splunklib/binding.py
|
_authentication
|
def _authentication(request_fun):
"""Decorator to handle autologin and authentication errors.
*request_fun* is a function taking no arguments that needs to
be run with this ``Context`` logged into Splunk.
``_authentication``'s behavior depends on whether the
``autologin`` field of ``Context`` is set to ``True`` or
``False``. If it's ``False``, then ``_authentication``
aborts if the ``Context`` is not logged in, and raises an
``AuthenticationError`` if an ``HTTPError`` of status 401 is
raised in *request_fun*. If it's ``True``, then
``_authentication`` will try at all sensible places to
log in before issuing the request.
If ``autologin`` is ``False``, ``_authentication`` makes
one roundtrip to the server if the ``Context`` is logged in,
or zero if it is not. If ``autologin`` is ``True``, it's less
deterministic, and may make at most three roundtrips (though
that would be a truly pathological case).
:param request_fun: A function of no arguments encapsulating
the request to make to the server.
**Example**::
import splunklib.binding as binding
c = binding.connect(..., autologin=True)
c.logout()
def f():
c.get("/services")
return 42
print _authentication(f)
"""
@wraps(request_fun)
def wrapper(self, *args, **kwargs):
if self.token is _NoAuthenticationToken:
# Not yet logged in.
if self.autologin and self.username and self.password:
# This will throw an uncaught
# AuthenticationError if it fails.
self.login()
else:
# Try the request anyway without authentication.
# Most requests will fail. Some will succeed, such as
# 'GET server/info'.
with _handle_auth_error("Request aborted: not logged in."):
return request_fun(self, *args, **kwargs)
try:
# Issue the request
return request_fun(self, *args, **kwargs)
except HTTPError as he:
if he.status == 401 and self.autologin:
# Authentication failed. Try logging in, and then
# rerunning the request. If either step fails, throw
# an AuthenticationError and give up.
with _handle_auth_error("Autologin failed."):
self.login()
with _handle_auth_error(
"Autologin succeeded, but there was an auth error on "
"next request. Something is very wrong."):
return request_fun(self, *args, **kwargs)
elif he.status == 401 and not self.autologin:
raise AuthenticationError(
"Request failed: Session is not logged in.", he)
else:
raise
return wrapper
|
python
|
def _authentication(request_fun):
"""Decorator to handle autologin and authentication errors.
*request_fun* is a function taking no arguments that needs to
be run with this ``Context`` logged into Splunk.
``_authentication``'s behavior depends on whether the
``autologin`` field of ``Context`` is set to ``True`` or
``False``. If it's ``False``, then ``_authentication``
aborts if the ``Context`` is not logged in, and raises an
``AuthenticationError`` if an ``HTTPError`` of status 401 is
raised in *request_fun*. If it's ``True``, then
``_authentication`` will try at all sensible places to
log in before issuing the request.
If ``autologin`` is ``False``, ``_authentication`` makes
one roundtrip to the server if the ``Context`` is logged in,
or zero if it is not. If ``autologin`` is ``True``, it's less
deterministic, and may make at most three roundtrips (though
that would be a truly pathological case).
:param request_fun: A function of no arguments encapsulating
the request to make to the server.
**Example**::
import splunklib.binding as binding
c = binding.connect(..., autologin=True)
c.logout()
def f():
c.get("/services")
return 42
print _authentication(f)
"""
@wraps(request_fun)
def wrapper(self, *args, **kwargs):
if self.token is _NoAuthenticationToken:
# Not yet logged in.
if self.autologin and self.username and self.password:
# This will throw an uncaught
# AuthenticationError if it fails.
self.login()
else:
# Try the request anyway without authentication.
# Most requests will fail. Some will succeed, such as
# 'GET server/info'.
with _handle_auth_error("Request aborted: not logged in."):
return request_fun(self, *args, **kwargs)
try:
# Issue the request
return request_fun(self, *args, **kwargs)
except HTTPError as he:
if he.status == 401 and self.autologin:
# Authentication failed. Try logging in, and then
# rerunning the request. If either step fails, throw
# an AuthenticationError and give up.
with _handle_auth_error("Autologin failed."):
self.login()
with _handle_auth_error(
"Autologin succeeded, but there was an auth error on "
"next request. Something is very wrong."):
return request_fun(self, *args, **kwargs)
elif he.status == 401 and not self.autologin:
raise AuthenticationError(
"Request failed: Session is not logged in.", he)
else:
raise
return wrapper
|
[
"def",
"_authentication",
"(",
"request_fun",
")",
":",
"@",
"wraps",
"(",
"request_fun",
")",
"def",
"wrapper",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"token",
"is",
"_NoAuthenticationToken",
":",
"# Not yet logged in.",
"if",
"self",
".",
"autologin",
"and",
"self",
".",
"username",
"and",
"self",
".",
"password",
":",
"# This will throw an uncaught",
"# AuthenticationError if it fails.",
"self",
".",
"login",
"(",
")",
"else",
":",
"# Try the request anyway without authentication.",
"# Most requests will fail. Some will succeed, such as",
"# 'GET server/info'.",
"with",
"_handle_auth_error",
"(",
"\"Request aborted: not logged in.\"",
")",
":",
"return",
"request_fun",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"try",
":",
"# Issue the request",
"return",
"request_fun",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"HTTPError",
"as",
"he",
":",
"if",
"he",
".",
"status",
"==",
"401",
"and",
"self",
".",
"autologin",
":",
"# Authentication failed. Try logging in, and then",
"# rerunning the request. If either step fails, throw",
"# an AuthenticationError and give up.",
"with",
"_handle_auth_error",
"(",
"\"Autologin failed.\"",
")",
":",
"self",
".",
"login",
"(",
")",
"with",
"_handle_auth_error",
"(",
"\"Autologin succeeded, but there was an auth error on \"",
"\"next request. Something is very wrong.\"",
")",
":",
"return",
"request_fun",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"elif",
"he",
".",
"status",
"==",
"401",
"and",
"not",
"self",
".",
"autologin",
":",
"raise",
"AuthenticationError",
"(",
"\"Request failed: Session is not logged in.\"",
",",
"he",
")",
"else",
":",
"raise",
"return",
"wrapper"
] |
Decorator to handle autologin and authentication errors.
*request_fun* is a function taking no arguments that needs to
be run with this ``Context`` logged into Splunk.
``_authentication``'s behavior depends on whether the
``autologin`` field of ``Context`` is set to ``True`` or
``False``. If it's ``False``, then ``_authentication``
aborts if the ``Context`` is not logged in, and raises an
``AuthenticationError`` if an ``HTTPError`` of status 401 is
raised in *request_fun*. If it's ``True``, then
``_authentication`` will try at all sensible places to
log in before issuing the request.
If ``autologin`` is ``False``, ``_authentication`` makes
one roundtrip to the server if the ``Context`` is logged in,
or zero if it is not. If ``autologin`` is ``True``, it's less
deterministic, and may make at most three roundtrips (though
that would be a truly pathological case).
:param request_fun: A function of no arguments encapsulating
the request to make to the server.
**Example**::
import splunklib.binding as binding
c = binding.connect(..., autologin=True)
c.logout()
def f():
c.get("/services")
return 42
print _authentication(f)
|
[
"Decorator",
"to",
"handle",
"autologin",
"and",
"authentication",
"errors",
"."
] |
433886e52698f0ddb9956f087b76041966c3bcd1
|
https://github.com/realestate-com-au/dashmat/blob/433886e52698f0ddb9956f087b76041966c3bcd1/dashmat/core_modules/splunk/splunk-sdk-1.3.0/splunklib/binding.py#L191-L259
|
238,509
|
realestate-com-au/dashmat
|
dashmat/core_modules/splunk/splunk-sdk-1.3.0/splunklib/binding.py
|
Context.get
|
def get(self, path_segment, owner=None, app=None, sharing=None, **query):
"""Performs a GET operation from the REST path segment with the given
namespace and query.
This method is named to match the HTTP method. ``get`` makes at least
one round trip to the server, one additional round trip for each 303
status returned, and at most two additional round trips if
the ``autologin`` field of :func:`connect` is set to ``True``.
If *owner*, *app*, and *sharing* are omitted, this method uses the
default :class:`Context` namespace. All other keyword arguments are
included in the URL as query parameters.
:raises AuthenticationError: Raised when the ``Context`` object is not
logged in.
:raises HTTPError: Raised when an error occurred in a GET operation from
*path_segment*.
:param path_segment: A REST path segment.
:type path_segment: ``string``
:param owner: The owner context of the namespace (optional).
:type owner: ``string``
:param app: The app context of the namespace (optional).
:type app: ``string``
:param sharing: The sharing mode of the namespace (optional).
:type sharing: ``string``
:param query: All other keyword arguments, which are used as query
parameters.
:type query: ``string``
:return: The response from the server.
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
**Example**::
c = binding.connect(...)
c.get('apps/local') == \\
{'body': ...a response reader object...,
'headers': [('content-length', '26208'),
('expires', 'Fri, 30 Oct 1998 00:00:00 GMT'),
('server', 'Splunkd'),
('connection', 'close'),
('cache-control', 'no-store, max-age=0, must-revalidate, no-cache'),
('date', 'Fri, 11 May 2012 16:30:35 GMT'),
('content-type', 'text/xml; charset=utf-8')],
'reason': 'OK',
'status': 200}
c.get('nonexistant/path') # raises HTTPError
c.logout()
c.get('apps/local') # raises AuthenticationError
"""
path = self.authority + self._abspath(path_segment, owner=owner,
app=app, sharing=sharing)
logging.debug("GET request to %s (body: %s)", path, repr(query))
response = self.http.get(path, self._auth_headers, **query)
return response
|
python
|
def get(self, path_segment, owner=None, app=None, sharing=None, **query):
"""Performs a GET operation from the REST path segment with the given
namespace and query.
This method is named to match the HTTP method. ``get`` makes at least
one round trip to the server, one additional round trip for each 303
status returned, and at most two additional round trips if
the ``autologin`` field of :func:`connect` is set to ``True``.
If *owner*, *app*, and *sharing* are omitted, this method uses the
default :class:`Context` namespace. All other keyword arguments are
included in the URL as query parameters.
:raises AuthenticationError: Raised when the ``Context`` object is not
logged in.
:raises HTTPError: Raised when an error occurred in a GET operation from
*path_segment*.
:param path_segment: A REST path segment.
:type path_segment: ``string``
:param owner: The owner context of the namespace (optional).
:type owner: ``string``
:param app: The app context of the namespace (optional).
:type app: ``string``
:param sharing: The sharing mode of the namespace (optional).
:type sharing: ``string``
:param query: All other keyword arguments, which are used as query
parameters.
:type query: ``string``
:return: The response from the server.
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
**Example**::
c = binding.connect(...)
c.get('apps/local') == \\
{'body': ...a response reader object...,
'headers': [('content-length', '26208'),
('expires', 'Fri, 30 Oct 1998 00:00:00 GMT'),
('server', 'Splunkd'),
('connection', 'close'),
('cache-control', 'no-store, max-age=0, must-revalidate, no-cache'),
('date', 'Fri, 11 May 2012 16:30:35 GMT'),
('content-type', 'text/xml; charset=utf-8')],
'reason': 'OK',
'status': 200}
c.get('nonexistant/path') # raises HTTPError
c.logout()
c.get('apps/local') # raises AuthenticationError
"""
path = self.authority + self._abspath(path_segment, owner=owner,
app=app, sharing=sharing)
logging.debug("GET request to %s (body: %s)", path, repr(query))
response = self.http.get(path, self._auth_headers, **query)
return response
|
[
"def",
"get",
"(",
"self",
",",
"path_segment",
",",
"owner",
"=",
"None",
",",
"app",
"=",
"None",
",",
"sharing",
"=",
"None",
",",
"*",
"*",
"query",
")",
":",
"path",
"=",
"self",
".",
"authority",
"+",
"self",
".",
"_abspath",
"(",
"path_segment",
",",
"owner",
"=",
"owner",
",",
"app",
"=",
"app",
",",
"sharing",
"=",
"sharing",
")",
"logging",
".",
"debug",
"(",
"\"GET request to %s (body: %s)\"",
",",
"path",
",",
"repr",
"(",
"query",
")",
")",
"response",
"=",
"self",
".",
"http",
".",
"get",
"(",
"path",
",",
"self",
".",
"_auth_headers",
",",
"*",
"*",
"query",
")",
"return",
"response"
] |
Performs a GET operation from the REST path segment with the given
namespace and query.
This method is named to match the HTTP method. ``get`` makes at least
one round trip to the server, one additional round trip for each 303
status returned, and at most two additional round trips if
the ``autologin`` field of :func:`connect` is set to ``True``.
If *owner*, *app*, and *sharing* are omitted, this method uses the
default :class:`Context` namespace. All other keyword arguments are
included in the URL as query parameters.
:raises AuthenticationError: Raised when the ``Context`` object is not
logged in.
:raises HTTPError: Raised when an error occurred in a GET operation from
*path_segment*.
:param path_segment: A REST path segment.
:type path_segment: ``string``
:param owner: The owner context of the namespace (optional).
:type owner: ``string``
:param app: The app context of the namespace (optional).
:type app: ``string``
:param sharing: The sharing mode of the namespace (optional).
:type sharing: ``string``
:param query: All other keyword arguments, which are used as query
parameters.
:type query: ``string``
:return: The response from the server.
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
**Example**::
c = binding.connect(...)
c.get('apps/local') == \\
{'body': ...a response reader object...,
'headers': [('content-length', '26208'),
('expires', 'Fri, 30 Oct 1998 00:00:00 GMT'),
('server', 'Splunkd'),
('connection', 'close'),
('cache-control', 'no-store, max-age=0, must-revalidate, no-cache'),
('date', 'Fri, 11 May 2012 16:30:35 GMT'),
('content-type', 'text/xml; charset=utf-8')],
'reason': 'OK',
'status': 200}
c.get('nonexistant/path') # raises HTTPError
c.logout()
c.get('apps/local') # raises AuthenticationError
|
[
"Performs",
"a",
"GET",
"operation",
"from",
"the",
"REST",
"path",
"segment",
"with",
"the",
"given",
"namespace",
"and",
"query",
"."
] |
433886e52698f0ddb9956f087b76041966c3bcd1
|
https://github.com/realestate-com-au/dashmat/blob/433886e52698f0ddb9956f087b76041966c3bcd1/dashmat/core_modules/splunk/splunk-sdk-1.3.0/splunklib/binding.py#L533-L587
|
238,510
|
realestate-com-au/dashmat
|
dashmat/core_modules/splunk/splunk-sdk-1.3.0/splunklib/binding.py
|
HttpLib.post
|
def post(self, url, headers=None, **kwargs):
"""Sends a POST request to a URL.
:param url: The URL.
:type url: ``string``
:param headers: A list of pairs specifying the headers for the HTTP
response (for example, ``[('Content-Type': 'text/cthulhu'), ('Token': 'boris')]``).
:type headers: ``list``
:param kwargs: Additional keyword arguments (optional). If the argument
is ``body``, the value is used as the body for the request, and the
keywords and their arguments will be URL encoded. If there is no
``body`` keyword argument, all the keyword arguments are encoded
into the body of the request in the format ``x-www-form-urlencoded``.
:type kwargs: ``dict``
:returns: A dictionary describing the response (see :class:`HttpLib` for
its structure).
:rtype: ``dict``
"""
if headers is None: headers = []
headers.append(("Content-Type", "application/x-www-form-urlencoded")),
# We handle GET-style arguments and an unstructured body. This is here
# to support the receivers/stream endpoint.
if 'body' in kwargs:
body = kwargs.pop('body')
if len(kwargs) > 0:
url = url + UrlEncoded('?' + _encode(**kwargs), skip_encode=True)
else:
body = _encode(**kwargs)
message = {
'method': "POST",
'headers': headers,
'body': body
}
return self.request(url, message)
|
python
|
def post(self, url, headers=None, **kwargs):
"""Sends a POST request to a URL.
:param url: The URL.
:type url: ``string``
:param headers: A list of pairs specifying the headers for the HTTP
response (for example, ``[('Content-Type': 'text/cthulhu'), ('Token': 'boris')]``).
:type headers: ``list``
:param kwargs: Additional keyword arguments (optional). If the argument
is ``body``, the value is used as the body for the request, and the
keywords and their arguments will be URL encoded. If there is no
``body`` keyword argument, all the keyword arguments are encoded
into the body of the request in the format ``x-www-form-urlencoded``.
:type kwargs: ``dict``
:returns: A dictionary describing the response (see :class:`HttpLib` for
its structure).
:rtype: ``dict``
"""
if headers is None: headers = []
headers.append(("Content-Type", "application/x-www-form-urlencoded")),
# We handle GET-style arguments and an unstructured body. This is here
# to support the receivers/stream endpoint.
if 'body' in kwargs:
body = kwargs.pop('body')
if len(kwargs) > 0:
url = url + UrlEncoded('?' + _encode(**kwargs), skip_encode=True)
else:
body = _encode(**kwargs)
message = {
'method': "POST",
'headers': headers,
'body': body
}
return self.request(url, message)
|
[
"def",
"post",
"(",
"self",
",",
"url",
",",
"headers",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"headers",
"is",
"None",
":",
"headers",
"=",
"[",
"]",
"headers",
".",
"append",
"(",
"(",
"\"Content-Type\"",
",",
"\"application/x-www-form-urlencoded\"",
")",
")",
",",
"# We handle GET-style arguments and an unstructured body. This is here",
"# to support the receivers/stream endpoint.",
"if",
"'body'",
"in",
"kwargs",
":",
"body",
"=",
"kwargs",
".",
"pop",
"(",
"'body'",
")",
"if",
"len",
"(",
"kwargs",
")",
">",
"0",
":",
"url",
"=",
"url",
"+",
"UrlEncoded",
"(",
"'?'",
"+",
"_encode",
"(",
"*",
"*",
"kwargs",
")",
",",
"skip_encode",
"=",
"True",
")",
"else",
":",
"body",
"=",
"_encode",
"(",
"*",
"*",
"kwargs",
")",
"message",
"=",
"{",
"'method'",
":",
"\"POST\"",
",",
"'headers'",
":",
"headers",
",",
"'body'",
":",
"body",
"}",
"return",
"self",
".",
"request",
"(",
"url",
",",
"message",
")"
] |
Sends a POST request to a URL.
:param url: The URL.
:type url: ``string``
:param headers: A list of pairs specifying the headers for the HTTP
response (for example, ``[('Content-Type': 'text/cthulhu'), ('Token': 'boris')]``).
:type headers: ``list``
:param kwargs: Additional keyword arguments (optional). If the argument
is ``body``, the value is used as the body for the request, and the
keywords and their arguments will be URL encoded. If there is no
``body`` keyword argument, all the keyword arguments are encoded
into the body of the request in the format ``x-www-form-urlencoded``.
:type kwargs: ``dict``
:returns: A dictionary describing the response (see :class:`HttpLib` for
its structure).
:rtype: ``dict``
|
[
"Sends",
"a",
"POST",
"request",
"to",
"a",
"URL",
"."
] |
433886e52698f0ddb9956f087b76041966c3bcd1
|
https://github.com/realestate-com-au/dashmat/blob/433886e52698f0ddb9956f087b76041966c3bcd1/dashmat/core_modules/splunk/splunk-sdk-1.3.0/splunklib/binding.py#L1060-L1093
|
238,511
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/reftrackwin.py
|
ReftrackWin.create_proxy_model
|
def create_proxy_model(self, model):
"""Create a sort filter proxy model for the given model
:param model: the model to wrap in a proxy
:type model: :class:`QtGui.QAbstractItemModel`
:returns: a new proxy model that can be used for sorting and filtering
:rtype: :class:`QtGui.QAbstractItemModel`
:raises: None
"""
proxy = ReftrackSortFilterModel(self)
proxy.setSourceModel(model)
model.rowsInserted.connect(self.sort_model)
return proxy
|
python
|
def create_proxy_model(self, model):
"""Create a sort filter proxy model for the given model
:param model: the model to wrap in a proxy
:type model: :class:`QtGui.QAbstractItemModel`
:returns: a new proxy model that can be used for sorting and filtering
:rtype: :class:`QtGui.QAbstractItemModel`
:raises: None
"""
proxy = ReftrackSortFilterModel(self)
proxy.setSourceModel(model)
model.rowsInserted.connect(self.sort_model)
return proxy
|
[
"def",
"create_proxy_model",
"(",
"self",
",",
"model",
")",
":",
"proxy",
"=",
"ReftrackSortFilterModel",
"(",
"self",
")",
"proxy",
".",
"setSourceModel",
"(",
"model",
")",
"model",
".",
"rowsInserted",
".",
"connect",
"(",
"self",
".",
"sort_model",
")",
"return",
"proxy"
] |
Create a sort filter proxy model for the given model
:param model: the model to wrap in a proxy
:type model: :class:`QtGui.QAbstractItemModel`
:returns: a new proxy model that can be used for sorting and filtering
:rtype: :class:`QtGui.QAbstractItemModel`
:raises: None
|
[
"Create",
"a",
"sort",
"filter",
"proxy",
"model",
"for",
"the",
"given",
"model"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/reftrackwin.py#L89-L101
|
238,512
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/reftrackwin.py
|
ReftrackWin.setup_filter
|
def setup_filter(self, ):
"""Create a checkbox for every reftrack type so one can filter them
:returns: None
:rtype: None
:raises: None
"""
types = self.refobjinter.types.keys()
for i, t in enumerate(types):
cb = QtGui.QCheckBox("%s" % t)
cb.setChecked(True)
cb.toggled.connect(self.update_filter)
self.typecbmap[t] = cb
self.typefilter_grid.addWidget(cb, int(i / 4), i % 4)
|
python
|
def setup_filter(self, ):
"""Create a checkbox for every reftrack type so one can filter them
:returns: None
:rtype: None
:raises: None
"""
types = self.refobjinter.types.keys()
for i, t in enumerate(types):
cb = QtGui.QCheckBox("%s" % t)
cb.setChecked(True)
cb.toggled.connect(self.update_filter)
self.typecbmap[t] = cb
self.typefilter_grid.addWidget(cb, int(i / 4), i % 4)
|
[
"def",
"setup_filter",
"(",
"self",
",",
")",
":",
"types",
"=",
"self",
".",
"refobjinter",
".",
"types",
".",
"keys",
"(",
")",
"for",
"i",
",",
"t",
"in",
"enumerate",
"(",
"types",
")",
":",
"cb",
"=",
"QtGui",
".",
"QCheckBox",
"(",
"\"%s\"",
"%",
"t",
")",
"cb",
".",
"setChecked",
"(",
"True",
")",
"cb",
".",
"toggled",
".",
"connect",
"(",
"self",
".",
"update_filter",
")",
"self",
".",
"typecbmap",
"[",
"t",
"]",
"=",
"cb",
"self",
".",
"typefilter_grid",
".",
"addWidget",
"(",
"cb",
",",
"int",
"(",
"i",
"/",
"4",
")",
",",
"i",
"%",
"4",
")"
] |
Create a checkbox for every reftrack type so one can filter them
:returns: None
:rtype: None
:raises: None
|
[
"Create",
"a",
"checkbox",
"for",
"every",
"reftrack",
"type",
"so",
"one",
"can",
"filter",
"them"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/reftrackwin.py#L127-L140
|
238,513
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/reftrackwin.py
|
ReftrackWin.switch_showfilter_icon
|
def switch_showfilter_icon(self, toggled):
"""Switch the icon on the showfilter_tb
:param toggled: the state of the button
:type toggled: :class:`bool`
:returns: None
:rtype: None
:raises: None
"""
at = QtCore.Qt.DownArrow if toggled else QtCore.Qt.RightArrow
self.showfilter_tb.setArrowType(at)
|
python
|
def switch_showfilter_icon(self, toggled):
"""Switch the icon on the showfilter_tb
:param toggled: the state of the button
:type toggled: :class:`bool`
:returns: None
:rtype: None
:raises: None
"""
at = QtCore.Qt.DownArrow if toggled else QtCore.Qt.RightArrow
self.showfilter_tb.setArrowType(at)
|
[
"def",
"switch_showfilter_icon",
"(",
"self",
",",
"toggled",
")",
":",
"at",
"=",
"QtCore",
".",
"Qt",
".",
"DownArrow",
"if",
"toggled",
"else",
"QtCore",
".",
"Qt",
".",
"RightArrow",
"self",
".",
"showfilter_tb",
".",
"setArrowType",
"(",
"at",
")"
] |
Switch the icon on the showfilter_tb
:param toggled: the state of the button
:type toggled: :class:`bool`
:returns: None
:rtype: None
:raises: None
|
[
"Switch",
"the",
"icon",
"on",
"the",
"showfilter_tb"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/reftrackwin.py#L142-L152
|
238,514
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/reftrackwin.py
|
ReftrackWin.open_addnew_win
|
def open_addnew_win(self, *args, **kwargs):
"""Open a new window so the use can choose to add new reftracks
:returns: None
:rtype: None
:raises: NotImplementedError
"""
if self.reftrackadderwin:
self.reftrackadderwin.close()
self.reftrackadderwin = ReftrackAdderWin(self.refobjinter, self.root, parent=self)
self.reftrackadderwin.destroyed.connect(self.addnewwin_destroyed)
self.reftrackadderwin.show()
|
python
|
def open_addnew_win(self, *args, **kwargs):
"""Open a new window so the use can choose to add new reftracks
:returns: None
:rtype: None
:raises: NotImplementedError
"""
if self.reftrackadderwin:
self.reftrackadderwin.close()
self.reftrackadderwin = ReftrackAdderWin(self.refobjinter, self.root, parent=self)
self.reftrackadderwin.destroyed.connect(self.addnewwin_destroyed)
self.reftrackadderwin.show()
|
[
"def",
"open_addnew_win",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"reftrackadderwin",
":",
"self",
".",
"reftrackadderwin",
".",
"close",
"(",
")",
"self",
".",
"reftrackadderwin",
"=",
"ReftrackAdderWin",
"(",
"self",
".",
"refobjinter",
",",
"self",
".",
"root",
",",
"parent",
"=",
"self",
")",
"self",
".",
"reftrackadderwin",
".",
"destroyed",
".",
"connect",
"(",
"self",
".",
"addnewwin_destroyed",
")",
"self",
".",
"reftrackadderwin",
".",
"show",
"(",
")"
] |
Open a new window so the use can choose to add new reftracks
:returns: None
:rtype: None
:raises: NotImplementedError
|
[
"Open",
"a",
"new",
"window",
"so",
"the",
"use",
"can",
"choose",
"to",
"add",
"new",
"reftracks"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/reftrackwin.py#L154-L165
|
238,515
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/reftrackwin.py
|
ReftrackWin.update_filter
|
def update_filter(self, *args, **kwargs):
"""Update the filter
:returns: None
:rtype: None
:raises: NotImplementedError
"""
forbidden_statuses = []
if not self.loaded_checkb.isChecked():
forbidden_statuses.append(reftrack.Reftrack.LOADED)
if not self.unloaded_checkb.isChecked():
forbidden_statuses.append(reftrack.Reftrack.UNLOADED)
if not self.imported_checkb.isChecked():
forbidden_statuses.append(reftrack.Reftrack.IMPORTED)
if not self.empty_checkb.isChecked():
forbidden_statuses.append(None)
self.proxy.set_forbidden_statuses(forbidden_statuses)
forbidden_types = []
for typ, cb in self.typecbmap.items():
if not cb.isChecked():
forbidden_types.append(typ)
self.proxy.set_forbidden_types(forbidden_types)
forbidden_uptodate = []
if not self.old_checkb.isChecked():
forbidden_uptodate.append(False)
if not self.newest_checkb.isChecked():
forbidden_uptodate.append(True)
self.proxy.set_forbidden_uptodate(forbidden_uptodate)
forbidden_alien = [] if self.alien_checkb.isChecked() else [True]
self.proxy.set_forbidden_alien(forbidden_alien)
self.proxy.setFilterWildcard(self.search_le.text())
|
python
|
def update_filter(self, *args, **kwargs):
"""Update the filter
:returns: None
:rtype: None
:raises: NotImplementedError
"""
forbidden_statuses = []
if not self.loaded_checkb.isChecked():
forbidden_statuses.append(reftrack.Reftrack.LOADED)
if not self.unloaded_checkb.isChecked():
forbidden_statuses.append(reftrack.Reftrack.UNLOADED)
if not self.imported_checkb.isChecked():
forbidden_statuses.append(reftrack.Reftrack.IMPORTED)
if not self.empty_checkb.isChecked():
forbidden_statuses.append(None)
self.proxy.set_forbidden_statuses(forbidden_statuses)
forbidden_types = []
for typ, cb in self.typecbmap.items():
if not cb.isChecked():
forbidden_types.append(typ)
self.proxy.set_forbidden_types(forbidden_types)
forbidden_uptodate = []
if not self.old_checkb.isChecked():
forbidden_uptodate.append(False)
if not self.newest_checkb.isChecked():
forbidden_uptodate.append(True)
self.proxy.set_forbidden_uptodate(forbidden_uptodate)
forbidden_alien = [] if self.alien_checkb.isChecked() else [True]
self.proxy.set_forbidden_alien(forbidden_alien)
self.proxy.setFilterWildcard(self.search_le.text())
|
[
"def",
"update_filter",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"forbidden_statuses",
"=",
"[",
"]",
"if",
"not",
"self",
".",
"loaded_checkb",
".",
"isChecked",
"(",
")",
":",
"forbidden_statuses",
".",
"append",
"(",
"reftrack",
".",
"Reftrack",
".",
"LOADED",
")",
"if",
"not",
"self",
".",
"unloaded_checkb",
".",
"isChecked",
"(",
")",
":",
"forbidden_statuses",
".",
"append",
"(",
"reftrack",
".",
"Reftrack",
".",
"UNLOADED",
")",
"if",
"not",
"self",
".",
"imported_checkb",
".",
"isChecked",
"(",
")",
":",
"forbidden_statuses",
".",
"append",
"(",
"reftrack",
".",
"Reftrack",
".",
"IMPORTED",
")",
"if",
"not",
"self",
".",
"empty_checkb",
".",
"isChecked",
"(",
")",
":",
"forbidden_statuses",
".",
"append",
"(",
"None",
")",
"self",
".",
"proxy",
".",
"set_forbidden_statuses",
"(",
"forbidden_statuses",
")",
"forbidden_types",
"=",
"[",
"]",
"for",
"typ",
",",
"cb",
"in",
"self",
".",
"typecbmap",
".",
"items",
"(",
")",
":",
"if",
"not",
"cb",
".",
"isChecked",
"(",
")",
":",
"forbidden_types",
".",
"append",
"(",
"typ",
")",
"self",
".",
"proxy",
".",
"set_forbidden_types",
"(",
"forbidden_types",
")",
"forbidden_uptodate",
"=",
"[",
"]",
"if",
"not",
"self",
".",
"old_checkb",
".",
"isChecked",
"(",
")",
":",
"forbidden_uptodate",
".",
"append",
"(",
"False",
")",
"if",
"not",
"self",
".",
"newest_checkb",
".",
"isChecked",
"(",
")",
":",
"forbidden_uptodate",
".",
"append",
"(",
"True",
")",
"self",
".",
"proxy",
".",
"set_forbidden_uptodate",
"(",
"forbidden_uptodate",
")",
"forbidden_alien",
"=",
"[",
"]",
"if",
"self",
".",
"alien_checkb",
".",
"isChecked",
"(",
")",
"else",
"[",
"True",
"]",
"self",
".",
"proxy",
".",
"set_forbidden_alien",
"(",
"forbidden_alien",
")",
"self",
".",
"proxy",
".",
"setFilterWildcard",
"(",
"self",
".",
"search_le",
".",
"text",
"(",
")",
")"
] |
Update the filter
:returns: None
:rtype: None
:raises: NotImplementedError
|
[
"Update",
"the",
"filter"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/reftrackwin.py#L176-L210
|
238,516
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/reftrackwin.py
|
ReftrackWin.sort_model
|
def sort_model(self, *args, **kwargs):
"""Sort the proxy model
:returns: None
:rtype: None
:raises: None
"""
self.proxy.sort(17) # sort the identifier
self.proxy.sort(2) # sort the element
self.proxy.sort(1) # sort the elementgrp
self.proxy.sort(0)
|
python
|
def sort_model(self, *args, **kwargs):
"""Sort the proxy model
:returns: None
:rtype: None
:raises: None
"""
self.proxy.sort(17) # sort the identifier
self.proxy.sort(2) # sort the element
self.proxy.sort(1) # sort the elementgrp
self.proxy.sort(0)
|
[
"def",
"sort_model",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"proxy",
".",
"sort",
"(",
"17",
")",
"# sort the identifier",
"self",
".",
"proxy",
".",
"sort",
"(",
"2",
")",
"# sort the element",
"self",
".",
"proxy",
".",
"sort",
"(",
"1",
")",
"# sort the elementgrp",
"self",
".",
"proxy",
".",
"sort",
"(",
"0",
")"
] |
Sort the proxy model
:returns: None
:rtype: None
:raises: None
|
[
"Sort",
"the",
"proxy",
"model"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/reftrackwin.py#L212-L222
|
238,517
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/reftrackwin.py
|
ReftrackAdderWin.add_selected
|
def add_selected(self, ):
"""Create a new reftrack with the selected element and type and add it to the root.
:returns: None
:rtype: None
:raises: NotImplementedError
"""
browser = self.shot_browser if self.browser_tabw.currentIndex() == 1 else self.asset_browser
selelements = browser.selected_indexes(2)
if not selelements:
return
seltypes = browser.selected_indexes(3)
if not seltypes:
return
elementi = selelements[0]
typi = seltypes[0]
if not elementi.isValid() or not typi.isValid():
return
element = elementi.internalPointer().internal_data()
typ = typi.internalPointer().internal_data()[0]
reftrack.Reftrack(self.root, self.refobjinter, typ=typ, element=element)
|
python
|
def add_selected(self, ):
"""Create a new reftrack with the selected element and type and add it to the root.
:returns: None
:rtype: None
:raises: NotImplementedError
"""
browser = self.shot_browser if self.browser_tabw.currentIndex() == 1 else self.asset_browser
selelements = browser.selected_indexes(2)
if not selelements:
return
seltypes = browser.selected_indexes(3)
if not seltypes:
return
elementi = selelements[0]
typi = seltypes[0]
if not elementi.isValid() or not typi.isValid():
return
element = elementi.internalPointer().internal_data()
typ = typi.internalPointer().internal_data()[0]
reftrack.Reftrack(self.root, self.refobjinter, typ=typ, element=element)
|
[
"def",
"add_selected",
"(",
"self",
",",
")",
":",
"browser",
"=",
"self",
".",
"shot_browser",
"if",
"self",
".",
"browser_tabw",
".",
"currentIndex",
"(",
")",
"==",
"1",
"else",
"self",
".",
"asset_browser",
"selelements",
"=",
"browser",
".",
"selected_indexes",
"(",
"2",
")",
"if",
"not",
"selelements",
":",
"return",
"seltypes",
"=",
"browser",
".",
"selected_indexes",
"(",
"3",
")",
"if",
"not",
"seltypes",
":",
"return",
"elementi",
"=",
"selelements",
"[",
"0",
"]",
"typi",
"=",
"seltypes",
"[",
"0",
"]",
"if",
"not",
"elementi",
".",
"isValid",
"(",
")",
"or",
"not",
"typi",
".",
"isValid",
"(",
")",
":",
"return",
"element",
"=",
"elementi",
".",
"internalPointer",
"(",
")",
".",
"internal_data",
"(",
")",
"typ",
"=",
"typi",
".",
"internalPointer",
"(",
")",
".",
"internal_data",
"(",
")",
"[",
"0",
"]",
"reftrack",
".",
"Reftrack",
"(",
"self",
".",
"root",
",",
"self",
".",
"refobjinter",
",",
"typ",
"=",
"typ",
",",
"element",
"=",
"element",
")"
] |
Create a new reftrack with the selected element and type and add it to the root.
:returns: None
:rtype: None
:raises: NotImplementedError
|
[
"Create",
"a",
"new",
"reftrack",
"with",
"the",
"selected",
"element",
"and",
"type",
"and",
"add",
"it",
"to",
"the",
"root",
"."
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/reftrackwin.py#L341-L362
|
238,518
|
rvswift/EB
|
EB/builder/utilities/output.py
|
write_ensemble
|
def write_ensemble(ensemble, options):
"""
Prints out the ensemble composition at each size
"""
# set output file name
size = len(ensemble)
filename = '%s_%s_queries.csv' % (options.outname, size)
file = os.path.join(os.getcwd(), filename)
f = open(file, 'w')
out = ', '.join(ensemble)
f.write(out)
f.close()
|
python
|
def write_ensemble(ensemble, options):
"""
Prints out the ensemble composition at each size
"""
# set output file name
size = len(ensemble)
filename = '%s_%s_queries.csv' % (options.outname, size)
file = os.path.join(os.getcwd(), filename)
f = open(file, 'w')
out = ', '.join(ensemble)
f.write(out)
f.close()
|
[
"def",
"write_ensemble",
"(",
"ensemble",
",",
"options",
")",
":",
"# set output file name",
"size",
"=",
"len",
"(",
"ensemble",
")",
"filename",
"=",
"'%s_%s_queries.csv'",
"%",
"(",
"options",
".",
"outname",
",",
"size",
")",
"file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"filename",
")",
"f",
"=",
"open",
"(",
"file",
",",
"'w'",
")",
"out",
"=",
"', '",
".",
"join",
"(",
"ensemble",
")",
"f",
".",
"write",
"(",
"out",
")",
"f",
".",
"close",
"(",
")"
] |
Prints out the ensemble composition at each size
|
[
"Prints",
"out",
"the",
"ensemble",
"composition",
"at",
"each",
"size"
] |
341880b79faf8147dc9fa6e90438531cd09fabcc
|
https://github.com/rvswift/EB/blob/341880b79faf8147dc9fa6e90438531cd09fabcc/EB/builder/utilities/output.py#L157-L173
|
238,519
|
siemens/django-dingos
|
dingos/templatetags/dingos_tags.py
|
lookup_blob
|
def lookup_blob(hash_value):
"""
Combines all given arguments to create clean title-tags values.
All arguments are divided by a " " seperator and HTML tags
are to be removed.
"""
try:
blob = BlobStorage.objects.get(sha256=hash_value)
except:
return "Blob not found"
return blob.content
|
python
|
def lookup_blob(hash_value):
"""
Combines all given arguments to create clean title-tags values.
All arguments are divided by a " " seperator and HTML tags
are to be removed.
"""
try:
blob = BlobStorage.objects.get(sha256=hash_value)
except:
return "Blob not found"
return blob.content
|
[
"def",
"lookup_blob",
"(",
"hash_value",
")",
":",
"try",
":",
"blob",
"=",
"BlobStorage",
".",
"objects",
".",
"get",
"(",
"sha256",
"=",
"hash_value",
")",
"except",
":",
"return",
"\"Blob not found\"",
"return",
"blob",
".",
"content"
] |
Combines all given arguments to create clean title-tags values.
All arguments are divided by a " " seperator and HTML tags
are to be removed.
|
[
"Combines",
"all",
"given",
"arguments",
"to",
"create",
"clean",
"title",
"-",
"tags",
"values",
".",
"All",
"arguments",
"are",
"divided",
"by",
"a",
"seperator",
"and",
"HTML",
"tags",
"are",
"to",
"be",
"removed",
"."
] |
7154f75b06d2538568e2f2455a76f3d0db0b7d70
|
https://github.com/siemens/django-dingos/blob/7154f75b06d2538568e2f2455a76f3d0db0b7d70/dingos/templatetags/dingos_tags.py#L241-L251
|
238,520
|
ericflo/hurricane
|
hurricane/handlers/comet/handler.py
|
CometHandler.comet_view
|
def comet_view(self, request):
"""
This is dumb function, it just passes everything it gets into the
message stream. Something else in the stream should be responsible
for asynchronously figuring out what to do with all these messages.
"""
request_id = self.id_for_request(request)
if not request_id:
request.write(HttpResponse(403).as_bytes())
request.finish()
return
data = {
'headers': request.headers,
'arguments': request.arguments,
'remote_ip': request.remote_ip,
'request_id': request_id,
}
message_kind = 'comet-%s' % (request.method,)
if request.method == 'POST':
data['body'] = simplejson.loads(request.body)
request.write(HttpResponse(201).as_bytes())
request.finish()
else:
self.pending_requests[request_id].append(request)
self.publish(Message(message_kind, datetime.now(), data))
|
python
|
def comet_view(self, request):
"""
This is dumb function, it just passes everything it gets into the
message stream. Something else in the stream should be responsible
for asynchronously figuring out what to do with all these messages.
"""
request_id = self.id_for_request(request)
if not request_id:
request.write(HttpResponse(403).as_bytes())
request.finish()
return
data = {
'headers': request.headers,
'arguments': request.arguments,
'remote_ip': request.remote_ip,
'request_id': request_id,
}
message_kind = 'comet-%s' % (request.method,)
if request.method == 'POST':
data['body'] = simplejson.loads(request.body)
request.write(HttpResponse(201).as_bytes())
request.finish()
else:
self.pending_requests[request_id].append(request)
self.publish(Message(message_kind, datetime.now(), data))
|
[
"def",
"comet_view",
"(",
"self",
",",
"request",
")",
":",
"request_id",
"=",
"self",
".",
"id_for_request",
"(",
"request",
")",
"if",
"not",
"request_id",
":",
"request",
".",
"write",
"(",
"HttpResponse",
"(",
"403",
")",
".",
"as_bytes",
"(",
")",
")",
"request",
".",
"finish",
"(",
")",
"return",
"data",
"=",
"{",
"'headers'",
":",
"request",
".",
"headers",
",",
"'arguments'",
":",
"request",
".",
"arguments",
",",
"'remote_ip'",
":",
"request",
".",
"remote_ip",
",",
"'request_id'",
":",
"request_id",
",",
"}",
"message_kind",
"=",
"'comet-%s'",
"%",
"(",
"request",
".",
"method",
",",
")",
"if",
"request",
".",
"method",
"==",
"'POST'",
":",
"data",
"[",
"'body'",
"]",
"=",
"simplejson",
".",
"loads",
"(",
"request",
".",
"body",
")",
"request",
".",
"write",
"(",
"HttpResponse",
"(",
"201",
")",
".",
"as_bytes",
"(",
")",
")",
"request",
".",
"finish",
"(",
")",
"else",
":",
"self",
".",
"pending_requests",
"[",
"request_id",
"]",
".",
"append",
"(",
"request",
")",
"self",
".",
"publish",
"(",
"Message",
"(",
"message_kind",
",",
"datetime",
".",
"now",
"(",
")",
",",
"data",
")",
")"
] |
This is dumb function, it just passes everything it gets into the
message stream. Something else in the stream should be responsible
for asynchronously figuring out what to do with all these messages.
|
[
"This",
"is",
"dumb",
"function",
"it",
"just",
"passes",
"everything",
"it",
"gets",
"into",
"the",
"message",
"stream",
".",
"Something",
"else",
"in",
"the",
"stream",
"should",
"be",
"responsible",
"for",
"asynchronously",
"figuring",
"out",
"what",
"to",
"do",
"with",
"all",
"these",
"messages",
"."
] |
c192b711b2b1c06a386d1a1a47f538b13a659cde
|
https://github.com/ericflo/hurricane/blob/c192b711b2b1c06a386d1a1a47f538b13a659cde/hurricane/handlers/comet/handler.py#L90-L115
|
238,521
|
mlavin/argyle
|
argyle/base.py
|
sshagent_run
|
def sshagent_run(cmd):
"""
Helper function.
Runs a command with SSH agent forwarding enabled.
Note:: Fabric (and paramiko) can't forward your SSH agent.
This helper uses your system's ssh to do so.
"""
# Handle context manager modifications
wrapped_cmd = _prefix_commands(_prefix_env_vars(cmd), 'remote')
try:
host, port = env.host_string.split(':')
return local(
u"ssh -p %s -A -o StrictHostKeyChecking=no %s@%s '%s'" % (
port, env.user, host, wrapped_cmd
)
)
except ValueError:
return local(
u"ssh -A -o StrictHostKeyChecking=no %s@%s '%s'" % (
env.user, env.host_string, wrapped_cmd
)
)
|
python
|
def sshagent_run(cmd):
"""
Helper function.
Runs a command with SSH agent forwarding enabled.
Note:: Fabric (and paramiko) can't forward your SSH agent.
This helper uses your system's ssh to do so.
"""
# Handle context manager modifications
wrapped_cmd = _prefix_commands(_prefix_env_vars(cmd), 'remote')
try:
host, port = env.host_string.split(':')
return local(
u"ssh -p %s -A -o StrictHostKeyChecking=no %s@%s '%s'" % (
port, env.user, host, wrapped_cmd
)
)
except ValueError:
return local(
u"ssh -A -o StrictHostKeyChecking=no %s@%s '%s'" % (
env.user, env.host_string, wrapped_cmd
)
)
|
[
"def",
"sshagent_run",
"(",
"cmd",
")",
":",
"# Handle context manager modifications",
"wrapped_cmd",
"=",
"_prefix_commands",
"(",
"_prefix_env_vars",
"(",
"cmd",
")",
",",
"'remote'",
")",
"try",
":",
"host",
",",
"port",
"=",
"env",
".",
"host_string",
".",
"split",
"(",
"':'",
")",
"return",
"local",
"(",
"u\"ssh -p %s -A -o StrictHostKeyChecking=no %s@%s '%s'\"",
"%",
"(",
"port",
",",
"env",
".",
"user",
",",
"host",
",",
"wrapped_cmd",
")",
")",
"except",
"ValueError",
":",
"return",
"local",
"(",
"u\"ssh -A -o StrictHostKeyChecking=no %s@%s '%s'\"",
"%",
"(",
"env",
".",
"user",
",",
"env",
".",
"host_string",
",",
"wrapped_cmd",
")",
")"
] |
Helper function.
Runs a command with SSH agent forwarding enabled.
Note:: Fabric (and paramiko) can't forward your SSH agent.
This helper uses your system's ssh to do so.
|
[
"Helper",
"function",
".",
"Runs",
"a",
"command",
"with",
"SSH",
"agent",
"forwarding",
"enabled",
"."
] |
92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72
|
https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/base.py#L10-L32
|
238,522
|
steenzout/python-serialization-json
|
steenzout/serialization/json/encoders.py
|
as_object
|
def as_object(obj):
"""Return a JSON serializable type for ``o``.
Args:
obj (:py:class:`object`): the object to be serialized.
Raises:
:py:class:`AttributeError`:
when ``o`` is not a Python object.
Returns:
(dict): JSON serializable type for the given object.
"""
LOGGER.debug('as_object(%s)', obj)
if isinstance(obj, datetime.date):
return as_date(obj)
elif hasattr(obj, '__dict__'):
# populate dict with visible attributes
out = {k: obj.__dict__[k] for k in obj.__dict__ if not k.startswith('_')}
# populate dict with property names and values
for k, v in (
(p, getattr(obj, p))
for p, _ in inspect.getmembers(
obj.__class__,
lambda x: isinstance(x, property))
):
out[k] = v
return out
|
python
|
def as_object(obj):
"""Return a JSON serializable type for ``o``.
Args:
obj (:py:class:`object`): the object to be serialized.
Raises:
:py:class:`AttributeError`:
when ``o`` is not a Python object.
Returns:
(dict): JSON serializable type for the given object.
"""
LOGGER.debug('as_object(%s)', obj)
if isinstance(obj, datetime.date):
return as_date(obj)
elif hasattr(obj, '__dict__'):
# populate dict with visible attributes
out = {k: obj.__dict__[k] for k in obj.__dict__ if not k.startswith('_')}
# populate dict with property names and values
for k, v in (
(p, getattr(obj, p))
for p, _ in inspect.getmembers(
obj.__class__,
lambda x: isinstance(x, property))
):
out[k] = v
return out
|
[
"def",
"as_object",
"(",
"obj",
")",
":",
"LOGGER",
".",
"debug",
"(",
"'as_object(%s)'",
",",
"obj",
")",
"if",
"isinstance",
"(",
"obj",
",",
"datetime",
".",
"date",
")",
":",
"return",
"as_date",
"(",
"obj",
")",
"elif",
"hasattr",
"(",
"obj",
",",
"'__dict__'",
")",
":",
"# populate dict with visible attributes",
"out",
"=",
"{",
"k",
":",
"obj",
".",
"__dict__",
"[",
"k",
"]",
"for",
"k",
"in",
"obj",
".",
"__dict__",
"if",
"not",
"k",
".",
"startswith",
"(",
"'_'",
")",
"}",
"# populate dict with property names and values",
"for",
"k",
",",
"v",
"in",
"(",
"(",
"p",
",",
"getattr",
"(",
"obj",
",",
"p",
")",
")",
"for",
"p",
",",
"_",
"in",
"inspect",
".",
"getmembers",
"(",
"obj",
".",
"__class__",
",",
"lambda",
"x",
":",
"isinstance",
"(",
"x",
",",
"property",
")",
")",
")",
":",
"out",
"[",
"k",
"]",
"=",
"v",
"return",
"out"
] |
Return a JSON serializable type for ``o``.
Args:
obj (:py:class:`object`): the object to be serialized.
Raises:
:py:class:`AttributeError`:
when ``o`` is not a Python object.
Returns:
(dict): JSON serializable type for the given object.
|
[
"Return",
"a",
"JSON",
"serializable",
"type",
"for",
"o",
"."
] |
583568e14cc02ba0bf711f56b8a0a3ad142c696d
|
https://github.com/steenzout/python-serialization-json/blob/583568e14cc02ba0bf711f56b8a0a3ad142c696d/steenzout/serialization/json/encoders.py#L29-L61
|
238,523
|
steenzout/python-serialization-json
|
steenzout/serialization/json/encoders.py
|
as_date
|
def as_date(dat):
"""Return the RFC3339 UTC string representation of the given date and time.
Args:
dat (:py:class:`datetime.date`): the object/type to be serialized.
Raises:
TypeError:
when ``o`` is not an instance of ``datetime.date``.
Returns:
(str) JSON serializable type for the given object.
"""
LOGGER.debug('as_date(%s)', dat)
return strict_rfc3339.timestamp_to_rfc3339_utcoffset(
calendar.timegm(dat.timetuple()))
|
python
|
def as_date(dat):
"""Return the RFC3339 UTC string representation of the given date and time.
Args:
dat (:py:class:`datetime.date`): the object/type to be serialized.
Raises:
TypeError:
when ``o`` is not an instance of ``datetime.date``.
Returns:
(str) JSON serializable type for the given object.
"""
LOGGER.debug('as_date(%s)', dat)
return strict_rfc3339.timestamp_to_rfc3339_utcoffset(
calendar.timegm(dat.timetuple()))
|
[
"def",
"as_date",
"(",
"dat",
")",
":",
"LOGGER",
".",
"debug",
"(",
"'as_date(%s)'",
",",
"dat",
")",
"return",
"strict_rfc3339",
".",
"timestamp_to_rfc3339_utcoffset",
"(",
"calendar",
".",
"timegm",
"(",
"dat",
".",
"timetuple",
"(",
")",
")",
")"
] |
Return the RFC3339 UTC string representation of the given date and time.
Args:
dat (:py:class:`datetime.date`): the object/type to be serialized.
Raises:
TypeError:
when ``o`` is not an instance of ``datetime.date``.
Returns:
(str) JSON serializable type for the given object.
|
[
"Return",
"the",
"RFC3339",
"UTC",
"string",
"representation",
"of",
"the",
"given",
"date",
"and",
"time",
"."
] |
583568e14cc02ba0bf711f56b8a0a3ad142c696d
|
https://github.com/steenzout/python-serialization-json/blob/583568e14cc02ba0bf711f56b8a0a3ad142c696d/steenzout/serialization/json/encoders.py#L64-L80
|
238,524
|
mrstephenneal/looptools
|
looptools/chunks.py
|
chunks
|
def chunks(iterable, chunk):
"""Yield successive n-sized chunks from an iterable."""
for i in range(0, len(iterable), chunk):
yield iterable[i:i + chunk]
|
python
|
def chunks(iterable, chunk):
"""Yield successive n-sized chunks from an iterable."""
for i in range(0, len(iterable), chunk):
yield iterable[i:i + chunk]
|
[
"def",
"chunks",
"(",
"iterable",
",",
"chunk",
")",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"iterable",
")",
",",
"chunk",
")",
":",
"yield",
"iterable",
"[",
"i",
":",
"i",
"+",
"chunk",
"]"
] |
Yield successive n-sized chunks from an iterable.
|
[
"Yield",
"successive",
"n",
"-",
"sized",
"chunks",
"from",
"an",
"iterable",
"."
] |
c4ef88d78e0fb672d09a18de0aa0edd31fd4db72
|
https://github.com/mrstephenneal/looptools/blob/c4ef88d78e0fb672d09a18de0aa0edd31fd4db72/looptools/chunks.py#L1-L4
|
238,525
|
pbrisk/mathtoolspy
|
mathtoolspy/integration/simplex_integrator.py
|
SimplexIntegrator.integrate
|
def integrate(self, function, lower_bound, upper_bound):
"""
Calculates the integral of the given one dimensional function
in the interval from lower_bound to upper_bound, with the simplex integration method.
"""
ret = 0.0
n = self.nsteps
xStep = (float(upper_bound) - float(lower_bound)) / float(n)
self.log_info("xStep" + str(xStep))
x = lower_bound
val1 = function(x)
self.log_info("val1: " + str(val1))
for i in range(n):
x = (i + 1) * xStep + lower_bound
self.log_info("x: " + str(x))
val2 = function(x)
self.log_info("val2: " + str(val2))
ret += 0.5 * xStep * (val1 + val2)
val1 = val2
return ret
|
python
|
def integrate(self, function, lower_bound, upper_bound):
"""
Calculates the integral of the given one dimensional function
in the interval from lower_bound to upper_bound, with the simplex integration method.
"""
ret = 0.0
n = self.nsteps
xStep = (float(upper_bound) - float(lower_bound)) / float(n)
self.log_info("xStep" + str(xStep))
x = lower_bound
val1 = function(x)
self.log_info("val1: " + str(val1))
for i in range(n):
x = (i + 1) * xStep + lower_bound
self.log_info("x: " + str(x))
val2 = function(x)
self.log_info("val2: " + str(val2))
ret += 0.5 * xStep * (val1 + val2)
val1 = val2
return ret
|
[
"def",
"integrate",
"(",
"self",
",",
"function",
",",
"lower_bound",
",",
"upper_bound",
")",
":",
"ret",
"=",
"0.0",
"n",
"=",
"self",
".",
"nsteps",
"xStep",
"=",
"(",
"float",
"(",
"upper_bound",
")",
"-",
"float",
"(",
"lower_bound",
")",
")",
"/",
"float",
"(",
"n",
")",
"self",
".",
"log_info",
"(",
"\"xStep\"",
"+",
"str",
"(",
"xStep",
")",
")",
"x",
"=",
"lower_bound",
"val1",
"=",
"function",
"(",
"x",
")",
"self",
".",
"log_info",
"(",
"\"val1: \"",
"+",
"str",
"(",
"val1",
")",
")",
"for",
"i",
"in",
"range",
"(",
"n",
")",
":",
"x",
"=",
"(",
"i",
"+",
"1",
")",
"*",
"xStep",
"+",
"lower_bound",
"self",
".",
"log_info",
"(",
"\"x: \"",
"+",
"str",
"(",
"x",
")",
")",
"val2",
"=",
"function",
"(",
"x",
")",
"self",
".",
"log_info",
"(",
"\"val2: \"",
"+",
"str",
"(",
"val2",
")",
")",
"ret",
"+=",
"0.5",
"*",
"xStep",
"*",
"(",
"val1",
"+",
"val2",
")",
"val1",
"=",
"val2",
"return",
"ret"
] |
Calculates the integral of the given one dimensional function
in the interval from lower_bound to upper_bound, with the simplex integration method.
|
[
"Calculates",
"the",
"integral",
"of",
"the",
"given",
"one",
"dimensional",
"function",
"in",
"the",
"interval",
"from",
"lower_bound",
"to",
"upper_bound",
"with",
"the",
"simplex",
"integration",
"method",
"."
] |
d0d35b45d20f346ba8a755e53ed0aa182fab43dd
|
https://github.com/pbrisk/mathtoolspy/blob/d0d35b45d20f346ba8a755e53ed0aa182fab43dd/mathtoolspy/integration/simplex_integrator.py#L25-L44
|
238,526
|
Brazelton-Lab/bio_utils
|
bio_utils/iterators/sam.py
|
sam_iter
|
def sam_iter(handle, start_line=None, headers=False):
"""Iterate over SAM file and return SAM entries
Args:
handle (file): SAM file handle, can be any iterator so long as it
it returns subsequent "lines" of a SAM entry
start_line (str): Next SAM entry, if 'handle' has been partially read
and you want to start iterating at the next entry, read the next
SAM entry and pass it to this variable when calling sam_iter.
See 'Examples.'
headers (bool): Yields headers if True, else skips lines starting with
"@"
Yields:
SamEntry: class containing all SAM data, yields str for headers if
headers options is True then yields GamEntry for entries
Examples:
The following two examples demonstrate how to use sam_iter.
Note: These doctests will not pass, examples are only in doctest
format as per convention. bio_utils uses pytests for testing.
>>> for entry in sam_iter(open('test.sam')):
... print(entry.qname) # Print query sequence name
... print(entry.flag) # Print flag number of alignment
... print(entry.rname) # Print reference sequence name
... print(entry.pos) # Print starting position of alignment
... print(entry.mapq) # Print mapping confidence of alignment
... print(entry.cigar) # Print CIGAR string of alignment
... print(entry.rnext) # Print paired read name
... print(entry.pnext) # Print position of paired read
... print(entry.tlen) # Print alignment length of all paired reads
... print(entry.seq) # Print query sequence
... print(entry.qual) # Print query quality scores
... print(entry.write()) # Print whole SAM entry
>>> sam_handle = open('test.gff3')
>>> next(sam_handle) # Skip first line/entry
>>> next_line = next(sam_handle) # Store next entry
>>> for entry in sam_iter(open('test.sam')):
... print(entry.qname) # Print query sequence name
... print(entry.flag) # Print flag number of alignment
... print(entry.rname) # Print reference sequence name
... print(entry.pos) # Print starting position of alignment
... print(entry.mapq) # Print mapping confidence of alignment
... print(entry.cigar) # Print CIGAR string of alignment
... print(entry.rnext) # Print paired read name
... print(entry.pnext) # Print position of paired read
... print(entry.tlen) # Print alignment length of all paired reads
... print(entry.seq) # Print query sequence
... print(entry.qual) # Print query quality scores
... print(entry.write()) # Print whole SAM entry
"""
# Speed tricks: reduces function calls
split = str.split
strip = str.strip
next_line = next
if start_line is None:
line = next_line(handle) # Read first B6/M8 entry
else:
line = start_line # Set header to given header
# Check if input is text or bytestream
if (isinstance(line, bytes)):
def next_line(i):
return next(i).decode('utf-8')
line = strip(line.decode('utf-8'))
else:
line = strip(line)
# A manual 'for' loop isn't needed to read the file properly and quickly,
# unlike fasta_iter and fastq_iter, but it is necessary begin iterating
# partway through a file when the user gives a starting line.
try: # Manually construct a for loop to improve speed by using 'next'
while True: # Loop until StopIteration Exception raised
split_line = split(line, '\t')
if line.startswith('@') and not headers:
line = strip(next_line(handle))
continue
elif line.startswith('@') and headers:
yield line
line = strip(next_line(handle))
continue
data = SamEntry()
data.qname = split_line[0]
try: # Differentiate between int and hex bit flags
data.flag = int(split_line[1])
except ValueError:
data.flag = split_line[1]
data.rname = split_line[2]
data.pos = int(split_line[3])
data.mapq = int(split_line[4])
data.cigar = split_line[5]
data.rnext = split_line[6]
data.pnext = int(split_line[7])
data.tlen = int(split_line[8])
data.seq = split_line[9]
data.qual = split_line[10]
line = strip(next_line(handle)) # Raises StopIteration at EOF
yield data
except StopIteration: # Yield last SAM entry
yield data
|
python
|
def sam_iter(handle, start_line=None, headers=False):
"""Iterate over SAM file and return SAM entries
Args:
handle (file): SAM file handle, can be any iterator so long as it
it returns subsequent "lines" of a SAM entry
start_line (str): Next SAM entry, if 'handle' has been partially read
and you want to start iterating at the next entry, read the next
SAM entry and pass it to this variable when calling sam_iter.
See 'Examples.'
headers (bool): Yields headers if True, else skips lines starting with
"@"
Yields:
SamEntry: class containing all SAM data, yields str for headers if
headers options is True then yields GamEntry for entries
Examples:
The following two examples demonstrate how to use sam_iter.
Note: These doctests will not pass, examples are only in doctest
format as per convention. bio_utils uses pytests for testing.
>>> for entry in sam_iter(open('test.sam')):
... print(entry.qname) # Print query sequence name
... print(entry.flag) # Print flag number of alignment
... print(entry.rname) # Print reference sequence name
... print(entry.pos) # Print starting position of alignment
... print(entry.mapq) # Print mapping confidence of alignment
... print(entry.cigar) # Print CIGAR string of alignment
... print(entry.rnext) # Print paired read name
... print(entry.pnext) # Print position of paired read
... print(entry.tlen) # Print alignment length of all paired reads
... print(entry.seq) # Print query sequence
... print(entry.qual) # Print query quality scores
... print(entry.write()) # Print whole SAM entry
>>> sam_handle = open('test.gff3')
>>> next(sam_handle) # Skip first line/entry
>>> next_line = next(sam_handle) # Store next entry
>>> for entry in sam_iter(open('test.sam')):
... print(entry.qname) # Print query sequence name
... print(entry.flag) # Print flag number of alignment
... print(entry.rname) # Print reference sequence name
... print(entry.pos) # Print starting position of alignment
... print(entry.mapq) # Print mapping confidence of alignment
... print(entry.cigar) # Print CIGAR string of alignment
... print(entry.rnext) # Print paired read name
... print(entry.pnext) # Print position of paired read
... print(entry.tlen) # Print alignment length of all paired reads
... print(entry.seq) # Print query sequence
... print(entry.qual) # Print query quality scores
... print(entry.write()) # Print whole SAM entry
"""
# Speed tricks: reduces function calls
split = str.split
strip = str.strip
next_line = next
if start_line is None:
line = next_line(handle) # Read first B6/M8 entry
else:
line = start_line # Set header to given header
# Check if input is text or bytestream
if (isinstance(line, bytes)):
def next_line(i):
return next(i).decode('utf-8')
line = strip(line.decode('utf-8'))
else:
line = strip(line)
# A manual 'for' loop isn't needed to read the file properly and quickly,
# unlike fasta_iter and fastq_iter, but it is necessary begin iterating
# partway through a file when the user gives a starting line.
try: # Manually construct a for loop to improve speed by using 'next'
while True: # Loop until StopIteration Exception raised
split_line = split(line, '\t')
if line.startswith('@') and not headers:
line = strip(next_line(handle))
continue
elif line.startswith('@') and headers:
yield line
line = strip(next_line(handle))
continue
data = SamEntry()
data.qname = split_line[0]
try: # Differentiate between int and hex bit flags
data.flag = int(split_line[1])
except ValueError:
data.flag = split_line[1]
data.rname = split_line[2]
data.pos = int(split_line[3])
data.mapq = int(split_line[4])
data.cigar = split_line[5]
data.rnext = split_line[6]
data.pnext = int(split_line[7])
data.tlen = int(split_line[8])
data.seq = split_line[9]
data.qual = split_line[10]
line = strip(next_line(handle)) # Raises StopIteration at EOF
yield data
except StopIteration: # Yield last SAM entry
yield data
|
[
"def",
"sam_iter",
"(",
"handle",
",",
"start_line",
"=",
"None",
",",
"headers",
"=",
"False",
")",
":",
"# Speed tricks: reduces function calls",
"split",
"=",
"str",
".",
"split",
"strip",
"=",
"str",
".",
"strip",
"next_line",
"=",
"next",
"if",
"start_line",
"is",
"None",
":",
"line",
"=",
"next_line",
"(",
"handle",
")",
"# Read first B6/M8 entry",
"else",
":",
"line",
"=",
"start_line",
"# Set header to given header",
"# Check if input is text or bytestream",
"if",
"(",
"isinstance",
"(",
"line",
",",
"bytes",
")",
")",
":",
"def",
"next_line",
"(",
"i",
")",
":",
"return",
"next",
"(",
"i",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"line",
"=",
"strip",
"(",
"line",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"else",
":",
"line",
"=",
"strip",
"(",
"line",
")",
"# A manual 'for' loop isn't needed to read the file properly and quickly,",
"# unlike fasta_iter and fastq_iter, but it is necessary begin iterating",
"# partway through a file when the user gives a starting line.",
"try",
":",
"# Manually construct a for loop to improve speed by using 'next'",
"while",
"True",
":",
"# Loop until StopIteration Exception raised",
"split_line",
"=",
"split",
"(",
"line",
",",
"'\\t'",
")",
"if",
"line",
".",
"startswith",
"(",
"'@'",
")",
"and",
"not",
"headers",
":",
"line",
"=",
"strip",
"(",
"next_line",
"(",
"handle",
")",
")",
"continue",
"elif",
"line",
".",
"startswith",
"(",
"'@'",
")",
"and",
"headers",
":",
"yield",
"line",
"line",
"=",
"strip",
"(",
"next_line",
"(",
"handle",
")",
")",
"continue",
"data",
"=",
"SamEntry",
"(",
")",
"data",
".",
"qname",
"=",
"split_line",
"[",
"0",
"]",
"try",
":",
"# Differentiate between int and hex bit flags",
"data",
".",
"flag",
"=",
"int",
"(",
"split_line",
"[",
"1",
"]",
")",
"except",
"ValueError",
":",
"data",
".",
"flag",
"=",
"split_line",
"[",
"1",
"]",
"data",
".",
"rname",
"=",
"split_line",
"[",
"2",
"]",
"data",
".",
"pos",
"=",
"int",
"(",
"split_line",
"[",
"3",
"]",
")",
"data",
".",
"mapq",
"=",
"int",
"(",
"split_line",
"[",
"4",
"]",
")",
"data",
".",
"cigar",
"=",
"split_line",
"[",
"5",
"]",
"data",
".",
"rnext",
"=",
"split_line",
"[",
"6",
"]",
"data",
".",
"pnext",
"=",
"int",
"(",
"split_line",
"[",
"7",
"]",
")",
"data",
".",
"tlen",
"=",
"int",
"(",
"split_line",
"[",
"8",
"]",
")",
"data",
".",
"seq",
"=",
"split_line",
"[",
"9",
"]",
"data",
".",
"qual",
"=",
"split_line",
"[",
"10",
"]",
"line",
"=",
"strip",
"(",
"next_line",
"(",
"handle",
")",
")",
"# Raises StopIteration at EOF",
"yield",
"data",
"except",
"StopIteration",
":",
"# Yield last SAM entry",
"yield",
"data"
] |
Iterate over SAM file and return SAM entries
Args:
handle (file): SAM file handle, can be any iterator so long as it
it returns subsequent "lines" of a SAM entry
start_line (str): Next SAM entry, if 'handle' has been partially read
and you want to start iterating at the next entry, read the next
SAM entry and pass it to this variable when calling sam_iter.
See 'Examples.'
headers (bool): Yields headers if True, else skips lines starting with
"@"
Yields:
SamEntry: class containing all SAM data, yields str for headers if
headers options is True then yields GamEntry for entries
Examples:
The following two examples demonstrate how to use sam_iter.
Note: These doctests will not pass, examples are only in doctest
format as per convention. bio_utils uses pytests for testing.
>>> for entry in sam_iter(open('test.sam')):
... print(entry.qname) # Print query sequence name
... print(entry.flag) # Print flag number of alignment
... print(entry.rname) # Print reference sequence name
... print(entry.pos) # Print starting position of alignment
... print(entry.mapq) # Print mapping confidence of alignment
... print(entry.cigar) # Print CIGAR string of alignment
... print(entry.rnext) # Print paired read name
... print(entry.pnext) # Print position of paired read
... print(entry.tlen) # Print alignment length of all paired reads
... print(entry.seq) # Print query sequence
... print(entry.qual) # Print query quality scores
... print(entry.write()) # Print whole SAM entry
>>> sam_handle = open('test.gff3')
>>> next(sam_handle) # Skip first line/entry
>>> next_line = next(sam_handle) # Store next entry
>>> for entry in sam_iter(open('test.sam')):
... print(entry.qname) # Print query sequence name
... print(entry.flag) # Print flag number of alignment
... print(entry.rname) # Print reference sequence name
... print(entry.pos) # Print starting position of alignment
... print(entry.mapq) # Print mapping confidence of alignment
... print(entry.cigar) # Print CIGAR string of alignment
... print(entry.rnext) # Print paired read name
... print(entry.pnext) # Print position of paired read
... print(entry.tlen) # Print alignment length of all paired reads
... print(entry.seq) # Print query sequence
... print(entry.qual) # Print query quality scores
... print(entry.write()) # Print whole SAM entry
|
[
"Iterate",
"over",
"SAM",
"file",
"and",
"return",
"SAM",
"entries"
] |
5a7ddf13ee0bf4baaaeb6b2b99e01bf74aa132b7
|
https://github.com/Brazelton-Lab/bio_utils/blob/5a7ddf13ee0bf4baaaeb6b2b99e01bf74aa132b7/bio_utils/iterators/sam.py#L104-L219
|
238,527
|
Brazelton-Lab/bio_utils
|
bio_utils/iterators/sam.py
|
SamEntry.write
|
def write(self):
"""Return SAM formatted string
Returns:
str: SAM formatted string containing entire SAM entry
"""
return '{0}\t{1}\t{2}\t{3}\t{4}\t' \
'{5}\t{6}\t{7}\t{8}\t{9}\t' \
'{10}{11}'.format(self.qname,
str(self.flag),
self.rname,
str(self.pos),
str(self.mapq),
self.cigar,
self.rnext,
str(self.pnext),
str(self.tlen),
self.seq,
self.qual,
os.linesep)
|
python
|
def write(self):
"""Return SAM formatted string
Returns:
str: SAM formatted string containing entire SAM entry
"""
return '{0}\t{1}\t{2}\t{3}\t{4}\t' \
'{5}\t{6}\t{7}\t{8}\t{9}\t' \
'{10}{11}'.format(self.qname,
str(self.flag),
self.rname,
str(self.pos),
str(self.mapq),
self.cigar,
self.rnext,
str(self.pnext),
str(self.tlen),
self.seq,
self.qual,
os.linesep)
|
[
"def",
"write",
"(",
"self",
")",
":",
"return",
"'{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t'",
"'{5}\\t{6}\\t{7}\\t{8}\\t{9}\\t'",
"'{10}{11}'",
".",
"format",
"(",
"self",
".",
"qname",
",",
"str",
"(",
"self",
".",
"flag",
")",
",",
"self",
".",
"rname",
",",
"str",
"(",
"self",
".",
"pos",
")",
",",
"str",
"(",
"self",
".",
"mapq",
")",
",",
"self",
".",
"cigar",
",",
"self",
".",
"rnext",
",",
"str",
"(",
"self",
".",
"pnext",
")",
",",
"str",
"(",
"self",
".",
"tlen",
")",
",",
"self",
".",
"seq",
",",
"self",
".",
"qual",
",",
"os",
".",
"linesep",
")"
] |
Return SAM formatted string
Returns:
str: SAM formatted string containing entire SAM entry
|
[
"Return",
"SAM",
"formatted",
"string"
] |
5a7ddf13ee0bf4baaaeb6b2b99e01bf74aa132b7
|
https://github.com/Brazelton-Lab/bio_utils/blob/5a7ddf13ee0bf4baaaeb6b2b99e01bf74aa132b7/bio_utils/iterators/sam.py#L81-L101
|
238,528
|
fiesta/fiesta-python
|
fiesta/fiesta.py
|
FiestaAPI.request
|
def request(self, request_path, data=None, do_authentication=True, is_json=True):
"""
Core "worker" for making requests and parsing JSON responses.
If `is_json` is ``True``, `data` should be a dictionary which
will be JSON-encoded.
"""
uri = self.api_uri % request_path
request = urllib2.Request(uri)
# Build up the request
if is_json:
request.add_header("Content-Type", "application/json")
if data is not None:
request.add_data(json.dumps(data))
elif data is not None:
request.add_data(data)
if do_authentication:
if self.client_id is None or self.client_secret is None:
raise Exception(u"You need to supply a client_id and client_secret to perform an authenticated request")
basic_auth = base64.b64encode("%s:%s" % (self.client_id, self.client_secret))
request.add_header("Authorization", "Basic %s" % basic_auth)
try:
response = self._make_request(request)
except Exception as inst:
raise # automatically re-raises the exception
if 'status' in response:
# Grab the status info if it exists
self._last_status_code = response['status']['code']
if 'message' in response['status']:
self._last_status_message = response['status']['message']
if 'data' in response:
return response['data']
return response
|
python
|
def request(self, request_path, data=None, do_authentication=True, is_json=True):
"""
Core "worker" for making requests and parsing JSON responses.
If `is_json` is ``True``, `data` should be a dictionary which
will be JSON-encoded.
"""
uri = self.api_uri % request_path
request = urllib2.Request(uri)
# Build up the request
if is_json:
request.add_header("Content-Type", "application/json")
if data is not None:
request.add_data(json.dumps(data))
elif data is not None:
request.add_data(data)
if do_authentication:
if self.client_id is None or self.client_secret is None:
raise Exception(u"You need to supply a client_id and client_secret to perform an authenticated request")
basic_auth = base64.b64encode("%s:%s" % (self.client_id, self.client_secret))
request.add_header("Authorization", "Basic %s" % basic_auth)
try:
response = self._make_request(request)
except Exception as inst:
raise # automatically re-raises the exception
if 'status' in response:
# Grab the status info if it exists
self._last_status_code = response['status']['code']
if 'message' in response['status']:
self._last_status_message = response['status']['message']
if 'data' in response:
return response['data']
return response
|
[
"def",
"request",
"(",
"self",
",",
"request_path",
",",
"data",
"=",
"None",
",",
"do_authentication",
"=",
"True",
",",
"is_json",
"=",
"True",
")",
":",
"uri",
"=",
"self",
".",
"api_uri",
"%",
"request_path",
"request",
"=",
"urllib2",
".",
"Request",
"(",
"uri",
")",
"# Build up the request",
"if",
"is_json",
":",
"request",
".",
"add_header",
"(",
"\"Content-Type\"",
",",
"\"application/json\"",
")",
"if",
"data",
"is",
"not",
"None",
":",
"request",
".",
"add_data",
"(",
"json",
".",
"dumps",
"(",
"data",
")",
")",
"elif",
"data",
"is",
"not",
"None",
":",
"request",
".",
"add_data",
"(",
"data",
")",
"if",
"do_authentication",
":",
"if",
"self",
".",
"client_id",
"is",
"None",
"or",
"self",
".",
"client_secret",
"is",
"None",
":",
"raise",
"Exception",
"(",
"u\"You need to supply a client_id and client_secret to perform an authenticated request\"",
")",
"basic_auth",
"=",
"base64",
".",
"b64encode",
"(",
"\"%s:%s\"",
"%",
"(",
"self",
".",
"client_id",
",",
"self",
".",
"client_secret",
")",
")",
"request",
".",
"add_header",
"(",
"\"Authorization\"",
",",
"\"Basic %s\"",
"%",
"basic_auth",
")",
"try",
":",
"response",
"=",
"self",
".",
"_make_request",
"(",
"request",
")",
"except",
"Exception",
"as",
"inst",
":",
"raise",
"# automatically re-raises the exception",
"if",
"'status'",
"in",
"response",
":",
"# Grab the status info if it exists",
"self",
".",
"_last_status_code",
"=",
"response",
"[",
"'status'",
"]",
"[",
"'code'",
"]",
"if",
"'message'",
"in",
"response",
"[",
"'status'",
"]",
":",
"self",
".",
"_last_status_message",
"=",
"response",
"[",
"'status'",
"]",
"[",
"'message'",
"]",
"if",
"'data'",
"in",
"response",
":",
"return",
"response",
"[",
"'data'",
"]",
"return",
"response"
] |
Core "worker" for making requests and parsing JSON responses.
If `is_json` is ``True``, `data` should be a dictionary which
will be JSON-encoded.
|
[
"Core",
"worker",
"for",
"making",
"requests",
"and",
"parsing",
"JSON",
"responses",
"."
] |
cfcc11e4ae4c76b1007794604c33dde877f62cfb
|
https://github.com/fiesta/fiesta-python/blob/cfcc11e4ae4c76b1007794604c33dde877f62cfb/fiesta/fiesta.py#L52-L90
|
238,529
|
fiesta/fiesta-python
|
fiesta/fiesta.py
|
FiestaAPI._make_request
|
def _make_request(self, request):
"""
Does the magic of actually sending the request and parsing the response
"""
# TODO: I'm sure all kinds of error checking needs to go here
try:
response_raw = urllib2.urlopen(request)
except urllib2.HTTPError, e:
print e.read()
raise
response_str = response_raw.read()
response = json.loads(response_str)
self._last_request = request
self._last_response = response_raw
self._last_response_str = response_str
return response
|
python
|
def _make_request(self, request):
"""
Does the magic of actually sending the request and parsing the response
"""
# TODO: I'm sure all kinds of error checking needs to go here
try:
response_raw = urllib2.urlopen(request)
except urllib2.HTTPError, e:
print e.read()
raise
response_str = response_raw.read()
response = json.loads(response_str)
self._last_request = request
self._last_response = response_raw
self._last_response_str = response_str
return response
|
[
"def",
"_make_request",
"(",
"self",
",",
"request",
")",
":",
"# TODO: I'm sure all kinds of error checking needs to go here",
"try",
":",
"response_raw",
"=",
"urllib2",
".",
"urlopen",
"(",
"request",
")",
"except",
"urllib2",
".",
"HTTPError",
",",
"e",
":",
"print",
"e",
".",
"read",
"(",
")",
"raise",
"response_str",
"=",
"response_raw",
".",
"read",
"(",
")",
"response",
"=",
"json",
".",
"loads",
"(",
"response_str",
")",
"self",
".",
"_last_request",
"=",
"request",
"self",
".",
"_last_response",
"=",
"response_raw",
"self",
".",
"_last_response_str",
"=",
"response_str",
"return",
"response"
] |
Does the magic of actually sending the request and parsing the response
|
[
"Does",
"the",
"magic",
"of",
"actually",
"sending",
"the",
"request",
"and",
"parsing",
"the",
"response"
] |
cfcc11e4ae4c76b1007794604c33dde877f62cfb
|
https://github.com/fiesta/fiesta-python/blob/cfcc11e4ae4c76b1007794604c33dde877f62cfb/fiesta/fiesta.py#L92-L109
|
238,530
|
fiesta/fiesta-python
|
fiesta/fiesta.py
|
FiestaGroup.add_member
|
def add_member(self, address, **kwargs):
"""
Add a member to a group.
All Fiesta membership options can be passed in as keyword
arguments. Some valid options include:
- `group_name`: Since each member can access a group using
their own name, you can override the `group_name` in this
method. By default, the group will have the name specified
on the class level `default_name` property.
- `display_name` is the full name of the user that they will
see throughout the UI if this is a new account.
- `welcome_message` should be a dictionary specified according
to the docs. If you set it to ``False``, no message will be
sent. See
http://docs.fiesta.cc/list-management-api.html#message for
formatting details.
.. seealso:: `Fiesta API documentation <http://docs.fiesta.cc/list-management-api.html#adding-members>`_
"""
path = 'membership/%s' % self.id
kwargs["address"] = address
if "group_name" not in kwargs and self.default_name:
kwargs["group_name"] = self.default_name
response_data = self.api.request(path, kwargs)
if 'user_id' in response_data:
user_id = response_data['user_id']
return FiestaUser(user_id, address=address, groups=[self])
return None
|
python
|
def add_member(self, address, **kwargs):
"""
Add a member to a group.
All Fiesta membership options can be passed in as keyword
arguments. Some valid options include:
- `group_name`: Since each member can access a group using
their own name, you can override the `group_name` in this
method. By default, the group will have the name specified
on the class level `default_name` property.
- `display_name` is the full name of the user that they will
see throughout the UI if this is a new account.
- `welcome_message` should be a dictionary specified according
to the docs. If you set it to ``False``, no message will be
sent. See
http://docs.fiesta.cc/list-management-api.html#message for
formatting details.
.. seealso:: `Fiesta API documentation <http://docs.fiesta.cc/list-management-api.html#adding-members>`_
"""
path = 'membership/%s' % self.id
kwargs["address"] = address
if "group_name" not in kwargs and self.default_name:
kwargs["group_name"] = self.default_name
response_data = self.api.request(path, kwargs)
if 'user_id' in response_data:
user_id = response_data['user_id']
return FiestaUser(user_id, address=address, groups=[self])
return None
|
[
"def",
"add_member",
"(",
"self",
",",
"address",
",",
"*",
"*",
"kwargs",
")",
":",
"path",
"=",
"'membership/%s'",
"%",
"self",
".",
"id",
"kwargs",
"[",
"\"address\"",
"]",
"=",
"address",
"if",
"\"group_name\"",
"not",
"in",
"kwargs",
"and",
"self",
".",
"default_name",
":",
"kwargs",
"[",
"\"group_name\"",
"]",
"=",
"self",
".",
"default_name",
"response_data",
"=",
"self",
".",
"api",
".",
"request",
"(",
"path",
",",
"kwargs",
")",
"if",
"'user_id'",
"in",
"response_data",
":",
"user_id",
"=",
"response_data",
"[",
"'user_id'",
"]",
"return",
"FiestaUser",
"(",
"user_id",
",",
"address",
"=",
"address",
",",
"groups",
"=",
"[",
"self",
"]",
")",
"return",
"None"
] |
Add a member to a group.
All Fiesta membership options can be passed in as keyword
arguments. Some valid options include:
- `group_name`: Since each member can access a group using
their own name, you can override the `group_name` in this
method. By default, the group will have the name specified
on the class level `default_name` property.
- `display_name` is the full name of the user that they will
see throughout the UI if this is a new account.
- `welcome_message` should be a dictionary specified according
to the docs. If you set it to ``False``, no message will be
sent. See
http://docs.fiesta.cc/list-management-api.html#message for
formatting details.
.. seealso:: `Fiesta API documentation <http://docs.fiesta.cc/list-management-api.html#adding-members>`_
|
[
"Add",
"a",
"member",
"to",
"a",
"group",
"."
] |
cfcc11e4ae4c76b1007794604c33dde877f62cfb
|
https://github.com/fiesta/fiesta-python/blob/cfcc11e4ae4c76b1007794604c33dde877f62cfb/fiesta/fiesta.py#L175-L208
|
238,531
|
fiesta/fiesta-python
|
fiesta/fiesta.py
|
FiestaGroup.send_message
|
def send_message(self, subject=None, text=None, markdown=None, message_dict=None):
"""
Helper function to send a message to a group
"""
message = FiestaMessage(self.api, self, subject, text, markdown, message_dict)
return message.send()
|
python
|
def send_message(self, subject=None, text=None, markdown=None, message_dict=None):
"""
Helper function to send a message to a group
"""
message = FiestaMessage(self.api, self, subject, text, markdown, message_dict)
return message.send()
|
[
"def",
"send_message",
"(",
"self",
",",
"subject",
"=",
"None",
",",
"text",
"=",
"None",
",",
"markdown",
"=",
"None",
",",
"message_dict",
"=",
"None",
")",
":",
"message",
"=",
"FiestaMessage",
"(",
"self",
".",
"api",
",",
"self",
",",
"subject",
",",
"text",
",",
"markdown",
",",
"message_dict",
")",
"return",
"message",
".",
"send",
"(",
")"
] |
Helper function to send a message to a group
|
[
"Helper",
"function",
"to",
"send",
"a",
"message",
"to",
"a",
"group"
] |
cfcc11e4ae4c76b1007794604c33dde877f62cfb
|
https://github.com/fiesta/fiesta-python/blob/cfcc11e4ae4c76b1007794604c33dde877f62cfb/fiesta/fiesta.py#L210-L215
|
238,532
|
fiesta/fiesta-python
|
fiesta/fiesta.py
|
FiestaGroup.add_application
|
def add_application(self, application_id, **kwargs):
"""
Add an application to a group.
`application_id` is the name of the application to add. Any
application options can be specified as kwargs.
"""
path = 'group/%s/application' % self.id
data = {'application_id': application_id}
if kwargs:
data["options"] = kwargs
self.api.request(path, data)
|
python
|
def add_application(self, application_id, **kwargs):
"""
Add an application to a group.
`application_id` is the name of the application to add. Any
application options can be specified as kwargs.
"""
path = 'group/%s/application' % self.id
data = {'application_id': application_id}
if kwargs:
data["options"] = kwargs
self.api.request(path, data)
|
[
"def",
"add_application",
"(",
"self",
",",
"application_id",
",",
"*",
"*",
"kwargs",
")",
":",
"path",
"=",
"'group/%s/application'",
"%",
"self",
".",
"id",
"data",
"=",
"{",
"'application_id'",
":",
"application_id",
"}",
"if",
"kwargs",
":",
"data",
"[",
"\"options\"",
"]",
"=",
"kwargs",
"self",
".",
"api",
".",
"request",
"(",
"path",
",",
"data",
")"
] |
Add an application to a group.
`application_id` is the name of the application to add. Any
application options can be specified as kwargs.
|
[
"Add",
"an",
"application",
"to",
"a",
"group",
"."
] |
cfcc11e4ae4c76b1007794604c33dde877f62cfb
|
https://github.com/fiesta/fiesta-python/blob/cfcc11e4ae4c76b1007794604c33dde877f62cfb/fiesta/fiesta.py#L217-L230
|
238,533
|
fiesta/fiesta-python
|
fiesta/fiesta.py
|
FiestaMessage.send
|
def send(self, group_id=None, message_dict=None):
"""
Send this current message to a group.
`message_dict` can be a dictionary formatted according to http://docs.fiesta.cc/list-management-api.html#messages
If message is provided, this method will ignore object-level variables.
"""
if self.group is not None and self.group.id is not None:
group_id = self.group.id
path = 'message/%s' % group_id
if message_dict is not None:
request_data = {
'message': message_dict,
}
else:
subject = self.subject
text = self.text
markdown = self.markdown
request_data = {
'message': {},
}
if subject:
request_data['message']['subject'] = subject
if text:
request_data['message']['text'] = text
if markdown:
request_data['message']['markdown'] = markdown
response_data = self.api.request(path, request_data)
self.id = response_data['message_id']
self.thread_id = response_data['thread_id']
self.sent_message = FiestaMessage(self.api, response_data['message'])
|
python
|
def send(self, group_id=None, message_dict=None):
"""
Send this current message to a group.
`message_dict` can be a dictionary formatted according to http://docs.fiesta.cc/list-management-api.html#messages
If message is provided, this method will ignore object-level variables.
"""
if self.group is not None and self.group.id is not None:
group_id = self.group.id
path = 'message/%s' % group_id
if message_dict is not None:
request_data = {
'message': message_dict,
}
else:
subject = self.subject
text = self.text
markdown = self.markdown
request_data = {
'message': {},
}
if subject:
request_data['message']['subject'] = subject
if text:
request_data['message']['text'] = text
if markdown:
request_data['message']['markdown'] = markdown
response_data = self.api.request(path, request_data)
self.id = response_data['message_id']
self.thread_id = response_data['thread_id']
self.sent_message = FiestaMessage(self.api, response_data['message'])
|
[
"def",
"send",
"(",
"self",
",",
"group_id",
"=",
"None",
",",
"message_dict",
"=",
"None",
")",
":",
"if",
"self",
".",
"group",
"is",
"not",
"None",
"and",
"self",
".",
"group",
".",
"id",
"is",
"not",
"None",
":",
"group_id",
"=",
"self",
".",
"group",
".",
"id",
"path",
"=",
"'message/%s'",
"%",
"group_id",
"if",
"message_dict",
"is",
"not",
"None",
":",
"request_data",
"=",
"{",
"'message'",
":",
"message_dict",
",",
"}",
"else",
":",
"subject",
"=",
"self",
".",
"subject",
"text",
"=",
"self",
".",
"text",
"markdown",
"=",
"self",
".",
"markdown",
"request_data",
"=",
"{",
"'message'",
":",
"{",
"}",
",",
"}",
"if",
"subject",
":",
"request_data",
"[",
"'message'",
"]",
"[",
"'subject'",
"]",
"=",
"subject",
"if",
"text",
":",
"request_data",
"[",
"'message'",
"]",
"[",
"'text'",
"]",
"=",
"text",
"if",
"markdown",
":",
"request_data",
"[",
"'message'",
"]",
"[",
"'markdown'",
"]",
"=",
"markdown",
"response_data",
"=",
"self",
".",
"api",
".",
"request",
"(",
"path",
",",
"request_data",
")",
"self",
".",
"id",
"=",
"response_data",
"[",
"'message_id'",
"]",
"self",
".",
"thread_id",
"=",
"response_data",
"[",
"'thread_id'",
"]",
"self",
".",
"sent_message",
"=",
"FiestaMessage",
"(",
"self",
".",
"api",
",",
"response_data",
"[",
"'message'",
"]",
")"
] |
Send this current message to a group.
`message_dict` can be a dictionary formatted according to http://docs.fiesta.cc/list-management-api.html#messages
If message is provided, this method will ignore object-level variables.
|
[
"Send",
"this",
"current",
"message",
"to",
"a",
"group",
"."
] |
cfcc11e4ae4c76b1007794604c33dde877f62cfb
|
https://github.com/fiesta/fiesta-python/blob/cfcc11e4ae4c76b1007794604c33dde877f62cfb/fiesta/fiesta.py#L296-L331
|
238,534
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/reftrackwidget.py
|
OptionSelector.setup_ui
|
def setup_ui(self, ):
"""Setup the ui
:returns: None
:rtype: None
:raises: None
"""
labels = self.reftrack.get_option_labels()
self.browser = ComboBoxBrowser(len(labels), headers=labels)
self.browser_vbox.addWidget(self.browser)
|
python
|
def setup_ui(self, ):
"""Setup the ui
:returns: None
:rtype: None
:raises: None
"""
labels = self.reftrack.get_option_labels()
self.browser = ComboBoxBrowser(len(labels), headers=labels)
self.browser_vbox.addWidget(self.browser)
|
[
"def",
"setup_ui",
"(",
"self",
",",
")",
":",
"labels",
"=",
"self",
".",
"reftrack",
".",
"get_option_labels",
"(",
")",
"self",
".",
"browser",
"=",
"ComboBoxBrowser",
"(",
"len",
"(",
"labels",
")",
",",
"headers",
"=",
"labels",
")",
"self",
".",
"browser_vbox",
".",
"addWidget",
"(",
"self",
".",
"browser",
")"
] |
Setup the ui
:returns: None
:rtype: None
:raises: None
|
[
"Setup",
"the",
"ui"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/reftrackwidget.py#L39-L48
|
238,535
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/reftrackwidget.py
|
OptionSelector.select
|
def select(self, ):
"""Store the selected taskfileinfo self.selected and accept the dialog
:returns: None
:rtype: None
:raises: None
"""
s = self.browser.selected_indexes(self.browser.get_depth()-1)
if not s:
return
i = s[0].internalPointer()
if i:
tfi = i.internal_data()
self.selected = tfi
self.accept()
|
python
|
def select(self, ):
"""Store the selected taskfileinfo self.selected and accept the dialog
:returns: None
:rtype: None
:raises: None
"""
s = self.browser.selected_indexes(self.browser.get_depth()-1)
if not s:
return
i = s[0].internalPointer()
if i:
tfi = i.internal_data()
self.selected = tfi
self.accept()
|
[
"def",
"select",
"(",
"self",
",",
")",
":",
"s",
"=",
"self",
".",
"browser",
".",
"selected_indexes",
"(",
"self",
".",
"browser",
".",
"get_depth",
"(",
")",
"-",
"1",
")",
"if",
"not",
"s",
":",
"return",
"i",
"=",
"s",
"[",
"0",
"]",
".",
"internalPointer",
"(",
")",
"if",
"i",
":",
"tfi",
"=",
"i",
".",
"internal_data",
"(",
")",
"self",
".",
"selected",
"=",
"tfi",
"self",
".",
"accept",
"(",
")"
] |
Store the selected taskfileinfo self.selected and accept the dialog
:returns: None
:rtype: None
:raises: None
|
[
"Store",
"the",
"selected",
"taskfileinfo",
"self",
".",
"selected",
"and",
"accept",
"the",
"dialog"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/reftrackwidget.py#L59-L73
|
238,536
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/reftrackwidget.py
|
ReftrackWidget.setup_icons
|
def setup_icons(self, ):
"""Setup the icons of the ui
:returns: None
:rtype: None
:raises: None
"""
iconbtns = [("menu_border_24x24.png", self.menu_tb),
("duplicate_border_24x24.png", self.duplicate_tb),
("delete_border_24x24.png", self.delete_tb),
("reference_border_24x24.png", self.reference_tb),
("load_border_24x24.png", self.load_tb),
("unload_border_24x24.png", self.unload_tb),
("replace_border_24x24.png", self.replace_tb),
("import_border_24x24.png", self.importref_tb),
("import_border_24x24.png", self.importtf_tb),
("alien.png", self.alien_tb),
("imported.png", self.imported_tb)]
for iconname, btn in iconbtns:
i = get_icon(iconname, asicon=True)
btn.setIcon(i)
|
python
|
def setup_icons(self, ):
"""Setup the icons of the ui
:returns: None
:rtype: None
:raises: None
"""
iconbtns = [("menu_border_24x24.png", self.menu_tb),
("duplicate_border_24x24.png", self.duplicate_tb),
("delete_border_24x24.png", self.delete_tb),
("reference_border_24x24.png", self.reference_tb),
("load_border_24x24.png", self.load_tb),
("unload_border_24x24.png", self.unload_tb),
("replace_border_24x24.png", self.replace_tb),
("import_border_24x24.png", self.importref_tb),
("import_border_24x24.png", self.importtf_tb),
("alien.png", self.alien_tb),
("imported.png", self.imported_tb)]
for iconname, btn in iconbtns:
i = get_icon(iconname, asicon=True)
btn.setIcon(i)
|
[
"def",
"setup_icons",
"(",
"self",
",",
")",
":",
"iconbtns",
"=",
"[",
"(",
"\"menu_border_24x24.png\"",
",",
"self",
".",
"menu_tb",
")",
",",
"(",
"\"duplicate_border_24x24.png\"",
",",
"self",
".",
"duplicate_tb",
")",
",",
"(",
"\"delete_border_24x24.png\"",
",",
"self",
".",
"delete_tb",
")",
",",
"(",
"\"reference_border_24x24.png\"",
",",
"self",
".",
"reference_tb",
")",
",",
"(",
"\"load_border_24x24.png\"",
",",
"self",
".",
"load_tb",
")",
",",
"(",
"\"unload_border_24x24.png\"",
",",
"self",
".",
"unload_tb",
")",
",",
"(",
"\"replace_border_24x24.png\"",
",",
"self",
".",
"replace_tb",
")",
",",
"(",
"\"import_border_24x24.png\"",
",",
"self",
".",
"importref_tb",
")",
",",
"(",
"\"import_border_24x24.png\"",
",",
"self",
".",
"importtf_tb",
")",
",",
"(",
"\"alien.png\"",
",",
"self",
".",
"alien_tb",
")",
",",
"(",
"\"imported.png\"",
",",
"self",
".",
"imported_tb",
")",
"]",
"for",
"iconname",
",",
"btn",
"in",
"iconbtns",
":",
"i",
"=",
"get_icon",
"(",
"iconname",
",",
"asicon",
"=",
"True",
")",
"btn",
".",
"setIcon",
"(",
"i",
")"
] |
Setup the icons of the ui
:returns: None
:rtype: None
:raises: None
|
[
"Setup",
"the",
"icons",
"of",
"the",
"ui"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/reftrackwidget.py#L104-L124
|
238,537
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/reftrackwidget.py
|
ReftrackWidget.set_maintext
|
def set_maintext(self, index):
"""Set the maintext_lb to display text information about the given reftrack
:param index: the index
:type index: :class:`QtGui.QModelIndex`
:returns: None
:rtype: None
:raises: None
"""
dr = QtCore.Qt.DisplayRole
text = ""
model = index.model()
for i in (1, 2, 3, 5, 6):
new = model.index(index.row(), i, index.parent()).data(dr)
if new is not None:
text = " | ".join((text, new)) if text else new
self.maintext_lb.setText(text)
|
python
|
def set_maintext(self, index):
"""Set the maintext_lb to display text information about the given reftrack
:param index: the index
:type index: :class:`QtGui.QModelIndex`
:returns: None
:rtype: None
:raises: None
"""
dr = QtCore.Qt.DisplayRole
text = ""
model = index.model()
for i in (1, 2, 3, 5, 6):
new = model.index(index.row(), i, index.parent()).data(dr)
if new is not None:
text = " | ".join((text, new)) if text else new
self.maintext_lb.setText(text)
|
[
"def",
"set_maintext",
"(",
"self",
",",
"index",
")",
":",
"dr",
"=",
"QtCore",
".",
"Qt",
".",
"DisplayRole",
"text",
"=",
"\"\"",
"model",
"=",
"index",
".",
"model",
"(",
")",
"for",
"i",
"in",
"(",
"1",
",",
"2",
",",
"3",
",",
"5",
",",
"6",
")",
":",
"new",
"=",
"model",
".",
"index",
"(",
"index",
".",
"row",
"(",
")",
",",
"i",
",",
"index",
".",
"parent",
"(",
")",
")",
".",
"data",
"(",
"dr",
")",
"if",
"new",
"is",
"not",
"None",
":",
"text",
"=",
"\" | \"",
".",
"join",
"(",
"(",
"text",
",",
"new",
")",
")",
"if",
"text",
"else",
"new",
"self",
".",
"maintext_lb",
".",
"setText",
"(",
"text",
")"
] |
Set the maintext_lb to display text information about the given reftrack
:param index: the index
:type index: :class:`QtGui.QModelIndex`
:returns: None
:rtype: None
:raises: None
|
[
"Set",
"the",
"maintext_lb",
"to",
"display",
"text",
"information",
"about",
"the",
"given",
"reftrack"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/reftrackwidget.py#L164-L181
|
238,538
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/reftrackwidget.py
|
ReftrackWidget.set_identifiertext
|
def set_identifiertext(self, index):
"""Set the identifier text on the identifier_lb
:param index: the index
:type index: :class:`QtGui.QModelIndex`
:returns: None
:rtype: None
:raises: None
"""
dr = QtCore.Qt.DisplayRole
t = index.model().index(index.row(), 17, index.parent()).data(dr)
if t is None:
t = -1
else:
t = t+1
self.identifier_lb.setText("#%s" % t)
|
python
|
def set_identifiertext(self, index):
"""Set the identifier text on the identifier_lb
:param index: the index
:type index: :class:`QtGui.QModelIndex`
:returns: None
:rtype: None
:raises: None
"""
dr = QtCore.Qt.DisplayRole
t = index.model().index(index.row(), 17, index.parent()).data(dr)
if t is None:
t = -1
else:
t = t+1
self.identifier_lb.setText("#%s" % t)
|
[
"def",
"set_identifiertext",
"(",
"self",
",",
"index",
")",
":",
"dr",
"=",
"QtCore",
".",
"Qt",
".",
"DisplayRole",
"t",
"=",
"index",
".",
"model",
"(",
")",
".",
"index",
"(",
"index",
".",
"row",
"(",
")",
",",
"17",
",",
"index",
".",
"parent",
"(",
")",
")",
".",
"data",
"(",
"dr",
")",
"if",
"t",
"is",
"None",
":",
"t",
"=",
"-",
"1",
"else",
":",
"t",
"=",
"t",
"+",
"1",
"self",
".",
"identifier_lb",
".",
"setText",
"(",
"\"#%s\"",
"%",
"t",
")"
] |
Set the identifier text on the identifier_lb
:param index: the index
:type index: :class:`QtGui.QModelIndex`
:returns: None
:rtype: None
:raises: None
|
[
"Set",
"the",
"identifier",
"text",
"on",
"the",
"identifier_lb"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/reftrackwidget.py#L183-L198
|
238,539
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/reftrackwidget.py
|
ReftrackWidget.set_type_icon
|
def set_type_icon(self, index):
"""Set the type icon on type_icon_lb
:param index: the index
:type index: :class:`QtGui.QModelIndex`
:returns: None
:rtype: None
:raises: None
"""
icon = index.model().index(index.row(), 0, index.parent()).data(QtCore.Qt.DecorationRole)
if icon:
pix = icon.pixmap(self.type_icon_lb.size())
self.type_icon_lb.setPixmap(pix)
else:
self.type_icon_lb.setPixmap(None)
|
python
|
def set_type_icon(self, index):
"""Set the type icon on type_icon_lb
:param index: the index
:type index: :class:`QtGui.QModelIndex`
:returns: None
:rtype: None
:raises: None
"""
icon = index.model().index(index.row(), 0, index.parent()).data(QtCore.Qt.DecorationRole)
if icon:
pix = icon.pixmap(self.type_icon_lb.size())
self.type_icon_lb.setPixmap(pix)
else:
self.type_icon_lb.setPixmap(None)
|
[
"def",
"set_type_icon",
"(",
"self",
",",
"index",
")",
":",
"icon",
"=",
"index",
".",
"model",
"(",
")",
".",
"index",
"(",
"index",
".",
"row",
"(",
")",
",",
"0",
",",
"index",
".",
"parent",
"(",
")",
")",
".",
"data",
"(",
"QtCore",
".",
"Qt",
".",
"DecorationRole",
")",
"if",
"icon",
":",
"pix",
"=",
"icon",
".",
"pixmap",
"(",
"self",
".",
"type_icon_lb",
".",
"size",
"(",
")",
")",
"self",
".",
"type_icon_lb",
".",
"setPixmap",
"(",
"pix",
")",
"else",
":",
"self",
".",
"type_icon_lb",
".",
"setPixmap",
"(",
"None",
")"
] |
Set the type icon on type_icon_lb
:param index: the index
:type index: :class:`QtGui.QModelIndex`
:returns: None
:rtype: None
:raises: None
|
[
"Set",
"the",
"type",
"icon",
"on",
"type_icon_lb"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/reftrackwidget.py#L200-L214
|
238,540
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/reftrackwidget.py
|
ReftrackWidget.disable_restricted
|
def disable_restricted(self, ):
"""Disable the restricted buttons
:returns: None
:rtype: None
:raises: None
"""
todisable = [(self.reftrack.duplicate, self.duplicate_tb),
(self.reftrack.delete, self.delete_tb),
(self.reftrack.reference, self.reference_tb),
(self.reftrack.replace, self.replace_tb),]
for action, btn in todisable:
res = self.reftrack.is_restricted(action)
btn.setDisabled(res)
|
python
|
def disable_restricted(self, ):
"""Disable the restricted buttons
:returns: None
:rtype: None
:raises: None
"""
todisable = [(self.reftrack.duplicate, self.duplicate_tb),
(self.reftrack.delete, self.delete_tb),
(self.reftrack.reference, self.reference_tb),
(self.reftrack.replace, self.replace_tb),]
for action, btn in todisable:
res = self.reftrack.is_restricted(action)
btn.setDisabled(res)
|
[
"def",
"disable_restricted",
"(",
"self",
",",
")",
":",
"todisable",
"=",
"[",
"(",
"self",
".",
"reftrack",
".",
"duplicate",
",",
"self",
".",
"duplicate_tb",
")",
",",
"(",
"self",
".",
"reftrack",
".",
"delete",
",",
"self",
".",
"delete_tb",
")",
",",
"(",
"self",
".",
"reftrack",
".",
"reference",
",",
"self",
".",
"reference_tb",
")",
",",
"(",
"self",
".",
"reftrack",
".",
"replace",
",",
"self",
".",
"replace_tb",
")",
",",
"]",
"for",
"action",
",",
"btn",
"in",
"todisable",
":",
"res",
"=",
"self",
".",
"reftrack",
".",
"is_restricted",
"(",
"action",
")",
"btn",
".",
"setDisabled",
"(",
"res",
")"
] |
Disable the restricted buttons
:returns: None
:rtype: None
:raises: None
|
[
"Disable",
"the",
"restricted",
"buttons"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/reftrackwidget.py#L216-L229
|
238,541
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/reftrackwidget.py
|
ReftrackWidget.hide_restricted
|
def hide_restricted(self, ):
"""Hide the restricted buttons
:returns: None
:rtype: None
:raises: None
"""
tohide = [((self.reftrack.unload, self.unload_tb),
(self.reftrack.load, self.load_tb)),
((self.reftrack.import_file, self.importtf_tb),
(self.reftrack.import_reference, self.importref_tb))]
for (action1, btn1), (action2, btn2) in tohide:
res1 = self.reftrack.is_restricted(action1)
res2 = self.reftrack.is_restricted(action2)
if res1 != res2:
btn1.setEnabled(True)
btn1.setHidden(res1)
btn2.setHidden(res2)
else: # both are restricted, then show one but disable it
btn1.setDisabled(True)
btn1.setVisible(True)
btn2.setVisible(False)
|
python
|
def hide_restricted(self, ):
"""Hide the restricted buttons
:returns: None
:rtype: None
:raises: None
"""
tohide = [((self.reftrack.unload, self.unload_tb),
(self.reftrack.load, self.load_tb)),
((self.reftrack.import_file, self.importtf_tb),
(self.reftrack.import_reference, self.importref_tb))]
for (action1, btn1), (action2, btn2) in tohide:
res1 = self.reftrack.is_restricted(action1)
res2 = self.reftrack.is_restricted(action2)
if res1 != res2:
btn1.setEnabled(True)
btn1.setHidden(res1)
btn2.setHidden(res2)
else: # both are restricted, then show one but disable it
btn1.setDisabled(True)
btn1.setVisible(True)
btn2.setVisible(False)
|
[
"def",
"hide_restricted",
"(",
"self",
",",
")",
":",
"tohide",
"=",
"[",
"(",
"(",
"self",
".",
"reftrack",
".",
"unload",
",",
"self",
".",
"unload_tb",
")",
",",
"(",
"self",
".",
"reftrack",
".",
"load",
",",
"self",
".",
"load_tb",
")",
")",
",",
"(",
"(",
"self",
".",
"reftrack",
".",
"import_file",
",",
"self",
".",
"importtf_tb",
")",
",",
"(",
"self",
".",
"reftrack",
".",
"import_reference",
",",
"self",
".",
"importref_tb",
")",
")",
"]",
"for",
"(",
"action1",
",",
"btn1",
")",
",",
"(",
"action2",
",",
"btn2",
")",
"in",
"tohide",
":",
"res1",
"=",
"self",
".",
"reftrack",
".",
"is_restricted",
"(",
"action1",
")",
"res2",
"=",
"self",
".",
"reftrack",
".",
"is_restricted",
"(",
"action2",
")",
"if",
"res1",
"!=",
"res2",
":",
"btn1",
".",
"setEnabled",
"(",
"True",
")",
"btn1",
".",
"setHidden",
"(",
"res1",
")",
"btn2",
".",
"setHidden",
"(",
"res2",
")",
"else",
":",
"# both are restricted, then show one but disable it",
"btn1",
".",
"setDisabled",
"(",
"True",
")",
"btn1",
".",
"setVisible",
"(",
"True",
")",
"btn2",
".",
"setVisible",
"(",
"False",
")"
] |
Hide the restricted buttons
:returns: None
:rtype: None
:raises: None
|
[
"Hide",
"the",
"restricted",
"buttons"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/reftrackwidget.py#L231-L252
|
238,542
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/reftrackwidget.py
|
ReftrackWidget.set_top_bar_color
|
def set_top_bar_color(self, index):
"""Set the color of the upper frame to the background color of the reftrack status
:param index: the index
:type index: :class:`QtGui.QModelIndex`
:returns: None
:rtype: None
:raises: None
"""
dr = QtCore.Qt.ForegroundRole
c = index.model().index(index.row(), 8, index.parent()).data(dr)
if not c:
c = self.upper_fr_default_bg_color
self.upper_fr.setStyleSheet('background-color: rgb(%s, %s, %s)' % (c.red(), c.green(), c.blue()))
|
python
|
def set_top_bar_color(self, index):
"""Set the color of the upper frame to the background color of the reftrack status
:param index: the index
:type index: :class:`QtGui.QModelIndex`
:returns: None
:rtype: None
:raises: None
"""
dr = QtCore.Qt.ForegroundRole
c = index.model().index(index.row(), 8, index.parent()).data(dr)
if not c:
c = self.upper_fr_default_bg_color
self.upper_fr.setStyleSheet('background-color: rgb(%s, %s, %s)' % (c.red(), c.green(), c.blue()))
|
[
"def",
"set_top_bar_color",
"(",
"self",
",",
"index",
")",
":",
"dr",
"=",
"QtCore",
".",
"Qt",
".",
"ForegroundRole",
"c",
"=",
"index",
".",
"model",
"(",
")",
".",
"index",
"(",
"index",
".",
"row",
"(",
")",
",",
"8",
",",
"index",
".",
"parent",
"(",
")",
")",
".",
"data",
"(",
"dr",
")",
"if",
"not",
"c",
":",
"c",
"=",
"self",
".",
"upper_fr_default_bg_color",
"self",
".",
"upper_fr",
".",
"setStyleSheet",
"(",
"'background-color: rgb(%s, %s, %s)'",
"%",
"(",
"c",
".",
"red",
"(",
")",
",",
"c",
".",
"green",
"(",
")",
",",
"c",
".",
"blue",
"(",
")",
")",
")"
] |
Set the color of the upper frame to the background color of the reftrack status
:param index: the index
:type index: :class:`QtGui.QModelIndex`
:returns: None
:rtype: None
:raises: None
|
[
"Set",
"the",
"color",
"of",
"the",
"upper",
"frame",
"to",
"the",
"background",
"color",
"of",
"the",
"reftrack",
"status"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/reftrackwidget.py#L254-L267
|
238,543
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/reftrackwidget.py
|
ReftrackWidget.set_menu
|
def set_menu(self, ):
"""Setup the menu that the menu_tb button uses
:returns: None
:rtype: None
:raises: None
"""
self.menu = QtGui.QMenu(self)
actions = self.reftrack.get_additional_actions()
self.actions = []
for a in actions:
if a.icon:
qaction = QtGui.QAction(a.icon, a.name, self)
else:
qaction = QtGui.QAction(a.name, self)
qaction.setCheckable(a.checkable)
qaction.setChecked(a.checked)
qaction.setEnabled(a.enabled)
qaction.triggered.connect(a.action)
self.actions.append(qaction)
self.menu.addAction(qaction)
self.menu_tb.setMenu(self.menu)
|
python
|
def set_menu(self, ):
"""Setup the menu that the menu_tb button uses
:returns: None
:rtype: None
:raises: None
"""
self.menu = QtGui.QMenu(self)
actions = self.reftrack.get_additional_actions()
self.actions = []
for a in actions:
if a.icon:
qaction = QtGui.QAction(a.icon, a.name, self)
else:
qaction = QtGui.QAction(a.name, self)
qaction.setCheckable(a.checkable)
qaction.setChecked(a.checked)
qaction.setEnabled(a.enabled)
qaction.triggered.connect(a.action)
self.actions.append(qaction)
self.menu.addAction(qaction)
self.menu_tb.setMenu(self.menu)
|
[
"def",
"set_menu",
"(",
"self",
",",
")",
":",
"self",
".",
"menu",
"=",
"QtGui",
".",
"QMenu",
"(",
"self",
")",
"actions",
"=",
"self",
".",
"reftrack",
".",
"get_additional_actions",
"(",
")",
"self",
".",
"actions",
"=",
"[",
"]",
"for",
"a",
"in",
"actions",
":",
"if",
"a",
".",
"icon",
":",
"qaction",
"=",
"QtGui",
".",
"QAction",
"(",
"a",
".",
"icon",
",",
"a",
".",
"name",
",",
"self",
")",
"else",
":",
"qaction",
"=",
"QtGui",
".",
"QAction",
"(",
"a",
".",
"name",
",",
"self",
")",
"qaction",
".",
"setCheckable",
"(",
"a",
".",
"checkable",
")",
"qaction",
".",
"setChecked",
"(",
"a",
".",
"checked",
")",
"qaction",
".",
"setEnabled",
"(",
"a",
".",
"enabled",
")",
"qaction",
".",
"triggered",
".",
"connect",
"(",
"a",
".",
"action",
")",
"self",
".",
"actions",
".",
"append",
"(",
"qaction",
")",
"self",
".",
"menu",
".",
"addAction",
"(",
"qaction",
")",
"self",
".",
"menu_tb",
".",
"setMenu",
"(",
"self",
".",
"menu",
")"
] |
Setup the menu that the menu_tb button uses
:returns: None
:rtype: None
:raises: None
|
[
"Setup",
"the",
"menu",
"that",
"the",
"menu_tb",
"button",
"uses"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/reftrackwidget.py#L301-L322
|
238,544
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/reftrackwidget.py
|
ReftrackWidget.get_taskfileinfo_selection
|
def get_taskfileinfo_selection(self, ):
"""Return a taskfileinfo that the user chose from the available options
:returns: the chosen taskfileinfo
:rtype: :class:`jukeboxcore.filesys.TaskFileInfo`
:raises: None
"""
sel = OptionSelector(self.reftrack)
sel.exec_()
return sel.selected
|
python
|
def get_taskfileinfo_selection(self, ):
"""Return a taskfileinfo that the user chose from the available options
:returns: the chosen taskfileinfo
:rtype: :class:`jukeboxcore.filesys.TaskFileInfo`
:raises: None
"""
sel = OptionSelector(self.reftrack)
sel.exec_()
return sel.selected
|
[
"def",
"get_taskfileinfo_selection",
"(",
"self",
",",
")",
":",
"sel",
"=",
"OptionSelector",
"(",
"self",
".",
"reftrack",
")",
"sel",
".",
"exec_",
"(",
")",
"return",
"sel",
".",
"selected"
] |
Return a taskfileinfo that the user chose from the available options
:returns: the chosen taskfileinfo
:rtype: :class:`jukeboxcore.filesys.TaskFileInfo`
:raises: None
|
[
"Return",
"a",
"taskfileinfo",
"that",
"the",
"user",
"chose",
"from",
"the",
"available",
"options"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/reftrackwidget.py#L324-L333
|
238,545
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/reftrackwidget.py
|
ReftrackWidget.reference
|
def reference(self, ):
"""Reference a file
:returns: None
:rtype: None
:raises: None
"""
tfi = self.get_taskfileinfo_selection()
if tfi:
self.reftrack.reference(tfi)
|
python
|
def reference(self, ):
"""Reference a file
:returns: None
:rtype: None
:raises: None
"""
tfi = self.get_taskfileinfo_selection()
if tfi:
self.reftrack.reference(tfi)
|
[
"def",
"reference",
"(",
"self",
",",
")",
":",
"tfi",
"=",
"self",
".",
"get_taskfileinfo_selection",
"(",
")",
"if",
"tfi",
":",
"self",
".",
"reftrack",
".",
"reference",
"(",
"tfi",
")"
] |
Reference a file
:returns: None
:rtype: None
:raises: None
|
[
"Reference",
"a",
"file"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/reftrackwidget.py#L371-L380
|
238,546
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/reftrackwidget.py
|
ReftrackWidget.import_file
|
def import_file(self, ):
"""Import a file
:returns: None
:rtype: None
:raises: NotImplementedError
"""
tfi = self.get_taskfileinfo_selection()
if tfi:
self.reftrack.import_file(tfi)
|
python
|
def import_file(self, ):
"""Import a file
:returns: None
:rtype: None
:raises: NotImplementedError
"""
tfi = self.get_taskfileinfo_selection()
if tfi:
self.reftrack.import_file(tfi)
|
[
"def",
"import_file",
"(",
"self",
",",
")",
":",
"tfi",
"=",
"self",
".",
"get_taskfileinfo_selection",
"(",
")",
"if",
"tfi",
":",
"self",
".",
"reftrack",
".",
"import_file",
"(",
"tfi",
")"
] |
Import a file
:returns: None
:rtype: None
:raises: NotImplementedError
|
[
"Import",
"a",
"file"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/reftrackwidget.py#L382-L391
|
238,547
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/reftrackwidget.py
|
ReftrackWidget.replace
|
def replace(self, ):
"""Replace the current reftrack
:returns: None
:rtype: None
:raises: None
"""
tfi = self.get_taskfileinfo_selection()
if tfi:
self.reftrack.replace(tfi)
|
python
|
def replace(self, ):
"""Replace the current reftrack
:returns: None
:rtype: None
:raises: None
"""
tfi = self.get_taskfileinfo_selection()
if tfi:
self.reftrack.replace(tfi)
|
[
"def",
"replace",
"(",
"self",
",",
")",
":",
"tfi",
"=",
"self",
".",
"get_taskfileinfo_selection",
"(",
")",
"if",
"tfi",
":",
"self",
".",
"reftrack",
".",
"replace",
"(",
"tfi",
")"
] |
Replace the current reftrack
:returns: None
:rtype: None
:raises: None
|
[
"Replace",
"the",
"current",
"reftrack"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/reftrackwidget.py#L402-L411
|
238,548
|
rouk1/django-image-renderer
|
renderer/models.py
|
delete_renditions_if_master_has_changed
|
def delete_renditions_if_master_has_changed(sender, instance, **kwargs):
'''if master file as changed delete all renditions'''
try:
obj = sender.objects.get(pk=instance.pk)
except sender.DoesNotExist:
pass # Object is new, so field hasn't technically changed.
else:
if not obj.master == instance.master: # Field has changed
obj.master.delete(save=False)
instance.delete_all_renditions()
|
python
|
def delete_renditions_if_master_has_changed(sender, instance, **kwargs):
'''if master file as changed delete all renditions'''
try:
obj = sender.objects.get(pk=instance.pk)
except sender.DoesNotExist:
pass # Object is new, so field hasn't technically changed.
else:
if not obj.master == instance.master: # Field has changed
obj.master.delete(save=False)
instance.delete_all_renditions()
|
[
"def",
"delete_renditions_if_master_has_changed",
"(",
"sender",
",",
"instance",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"obj",
"=",
"sender",
".",
"objects",
".",
"get",
"(",
"pk",
"=",
"instance",
".",
"pk",
")",
"except",
"sender",
".",
"DoesNotExist",
":",
"pass",
"# Object is new, so field hasn't technically changed.",
"else",
":",
"if",
"not",
"obj",
".",
"master",
"==",
"instance",
".",
"master",
":",
"# Field has changed",
"obj",
".",
"master",
".",
"delete",
"(",
"save",
"=",
"False",
")",
"instance",
".",
"delete_all_renditions",
"(",
")"
] |
if master file as changed delete all renditions
|
[
"if",
"master",
"file",
"as",
"changed",
"delete",
"all",
"renditions"
] |
6a4326b77709601e18ee04f5626cf475c5ea0bb5
|
https://github.com/rouk1/django-image-renderer/blob/6a4326b77709601e18ee04f5626cf475c5ea0bb5/renderer/models.py#L157-L166
|
238,549
|
rouk1/django-image-renderer
|
renderer/models.py
|
photo_post_delete_handler
|
def photo_post_delete_handler(sender, **kwargs):
'''delete image when rows is gone from database'''
instance = kwargs.get('instance')
instance.master.delete(save=False)
instance.delete_all_renditions()
|
python
|
def photo_post_delete_handler(sender, **kwargs):
'''delete image when rows is gone from database'''
instance = kwargs.get('instance')
instance.master.delete(save=False)
instance.delete_all_renditions()
|
[
"def",
"photo_post_delete_handler",
"(",
"sender",
",",
"*",
"*",
"kwargs",
")",
":",
"instance",
"=",
"kwargs",
".",
"get",
"(",
"'instance'",
")",
"instance",
".",
"master",
".",
"delete",
"(",
"save",
"=",
"False",
")",
"instance",
".",
"delete_all_renditions",
"(",
")"
] |
delete image when rows is gone from database
|
[
"delete",
"image",
"when",
"rows",
"is",
"gone",
"from",
"database"
] |
6a4326b77709601e18ee04f5626cf475c5ea0bb5
|
https://github.com/rouk1/django-image-renderer/blob/6a4326b77709601e18ee04f5626cf475c5ea0bb5/renderer/models.py#L170-L174
|
238,550
|
rouk1/django-image-renderer
|
renderer/models.py
|
MasterImage.get_rendition_size
|
def get_rendition_size(self, width=0, height=0):
'''returns real rendition URL'''
if width == 0 and height == 0:
return (self.master_width, self.master_height)
target_width = int(width)
target_height = int(height)
ratio = self.master_width / float(self.master_height)
if target_height == 0 and target_width != 0:
target_height = int(target_width / ratio)
if target_height != 0 and target_width == 0:
target_width = int(target_height * ratio)
return target_width, target_height
|
python
|
def get_rendition_size(self, width=0, height=0):
'''returns real rendition URL'''
if width == 0 and height == 0:
return (self.master_width, self.master_height)
target_width = int(width)
target_height = int(height)
ratio = self.master_width / float(self.master_height)
if target_height == 0 and target_width != 0:
target_height = int(target_width / ratio)
if target_height != 0 and target_width == 0:
target_width = int(target_height * ratio)
return target_width, target_height
|
[
"def",
"get_rendition_size",
"(",
"self",
",",
"width",
"=",
"0",
",",
"height",
"=",
"0",
")",
":",
"if",
"width",
"==",
"0",
"and",
"height",
"==",
"0",
":",
"return",
"(",
"self",
".",
"master_width",
",",
"self",
".",
"master_height",
")",
"target_width",
"=",
"int",
"(",
"width",
")",
"target_height",
"=",
"int",
"(",
"height",
")",
"ratio",
"=",
"self",
".",
"master_width",
"/",
"float",
"(",
"self",
".",
"master_height",
")",
"if",
"target_height",
"==",
"0",
"and",
"target_width",
"!=",
"0",
":",
"target_height",
"=",
"int",
"(",
"target_width",
"/",
"ratio",
")",
"if",
"target_height",
"!=",
"0",
"and",
"target_width",
"==",
"0",
":",
"target_width",
"=",
"int",
"(",
"target_height",
"*",
"ratio",
")",
"return",
"target_width",
",",
"target_height"
] |
returns real rendition URL
|
[
"returns",
"real",
"rendition",
"URL"
] |
6a4326b77709601e18ee04f5626cf475c5ea0bb5
|
https://github.com/rouk1/django-image-renderer/blob/6a4326b77709601e18ee04f5626cf475c5ea0bb5/renderer/models.py#L49-L64
|
238,551
|
rouk1/django-image-renderer
|
renderer/models.py
|
MasterImage.get_rendition_url
|
def get_rendition_url(self, width=0, height=0):
'''get the rendition URL for a specified size
if the renditions does not exists it will be created
'''
if width == 0 and height == 0:
return self.get_master_url()
target_width, target_height = self.get_rendition_size(width, height)
key = '%sx%s' % (target_width, target_height)
if not self.renditions:
self.renditions = {}
rendition_name = self.renditions.get(key, False)
if not rendition_name:
rendition_name = self.make_rendition(target_width, target_height)
return default_storage.url(rendition_name)
|
python
|
def get_rendition_url(self, width=0, height=0):
'''get the rendition URL for a specified size
if the renditions does not exists it will be created
'''
if width == 0 and height == 0:
return self.get_master_url()
target_width, target_height = self.get_rendition_size(width, height)
key = '%sx%s' % (target_width, target_height)
if not self.renditions:
self.renditions = {}
rendition_name = self.renditions.get(key, False)
if not rendition_name:
rendition_name = self.make_rendition(target_width, target_height)
return default_storage.url(rendition_name)
|
[
"def",
"get_rendition_url",
"(",
"self",
",",
"width",
"=",
"0",
",",
"height",
"=",
"0",
")",
":",
"if",
"width",
"==",
"0",
"and",
"height",
"==",
"0",
":",
"return",
"self",
".",
"get_master_url",
"(",
")",
"target_width",
",",
"target_height",
"=",
"self",
".",
"get_rendition_size",
"(",
"width",
",",
"height",
")",
"key",
"=",
"'%sx%s'",
"%",
"(",
"target_width",
",",
"target_height",
")",
"if",
"not",
"self",
".",
"renditions",
":",
"self",
".",
"renditions",
"=",
"{",
"}",
"rendition_name",
"=",
"self",
".",
"renditions",
".",
"get",
"(",
"key",
",",
"False",
")",
"if",
"not",
"rendition_name",
":",
"rendition_name",
"=",
"self",
".",
"make_rendition",
"(",
"target_width",
",",
"target_height",
")",
"return",
"default_storage",
".",
"url",
"(",
"rendition_name",
")"
] |
get the rendition URL for a specified size
if the renditions does not exists it will be created
|
[
"get",
"the",
"rendition",
"URL",
"for",
"a",
"specified",
"size"
] |
6a4326b77709601e18ee04f5626cf475c5ea0bb5
|
https://github.com/rouk1/django-image-renderer/blob/6a4326b77709601e18ee04f5626cf475c5ea0bb5/renderer/models.py#L66-L82
|
238,552
|
rouk1/django-image-renderer
|
renderer/models.py
|
MasterImage.delete_all_renditions
|
def delete_all_renditions(self):
'''delete all renditions and rendition dict'''
if self.renditions:
for r in self.renditions.values():
default_storage.delete(r)
self.renditions = {}
|
python
|
def delete_all_renditions(self):
'''delete all renditions and rendition dict'''
if self.renditions:
for r in self.renditions.values():
default_storage.delete(r)
self.renditions = {}
|
[
"def",
"delete_all_renditions",
"(",
"self",
")",
":",
"if",
"self",
".",
"renditions",
":",
"for",
"r",
"in",
"self",
".",
"renditions",
".",
"values",
"(",
")",
":",
"default_storage",
".",
"delete",
"(",
"r",
")",
"self",
".",
"renditions",
"=",
"{",
"}"
] |
delete all renditions and rendition dict
|
[
"delete",
"all",
"renditions",
"and",
"rendition",
"dict"
] |
6a4326b77709601e18ee04f5626cf475c5ea0bb5
|
https://github.com/rouk1/django-image-renderer/blob/6a4326b77709601e18ee04f5626cf475c5ea0bb5/renderer/models.py#L88-L93
|
238,553
|
rouk1/django-image-renderer
|
renderer/models.py
|
MasterImage.make_rendition
|
def make_rendition(self, width, height):
'''build a rendition
0 x 0 -> will give master URL
only width -> will make a renditions with master's aspect ratio
width x height -> will make an image potentialy cropped
'''
image = Image.open(self.master)
format = image.format
target_w = float(width)
target_h = float(height)
if (target_w == 0):
target_w = self.master_width
if (target_h == 0):
target_h = self.master_height
rendition_key = '%dx%d' % (target_w, target_h)
if rendition_key in self.renditions:
return self.renditions[rendition_key]
if (target_w != self.master_width or target_h != self.master_height):
r = target_w / target_h
R = float(self.master_width) / self.master_height
if r != R:
if r > R:
crop_w = self.master_width
crop_h = crop_w / r
x = 0
y = int(self.master_height - crop_h) >> 1
else:
crop_h = self.master_height
crop_w = crop_h * r
x = int(self.master_width - crop_w) >> 1
y = 0
image = image.crop((x, y, int(crop_w + x), int(crop_h + y)))
image.thumbnail((int(target_w), int(target_h)), Image.ANTIALIAS)
filename, ext = os.path.splitext(self.get_master_filename())
rendition_name = '%s/%s_%s%s' % (
IMAGE_DIRECTORY,
filename,
rendition_key,
ext
)
fd = BytesIO()
image.save(fd, format)
default_storage.save(rendition_name, fd)
self.renditions[rendition_key] = rendition_name
self.save()
return rendition_name
return self.master.name
|
python
|
def make_rendition(self, width, height):
'''build a rendition
0 x 0 -> will give master URL
only width -> will make a renditions with master's aspect ratio
width x height -> will make an image potentialy cropped
'''
image = Image.open(self.master)
format = image.format
target_w = float(width)
target_h = float(height)
if (target_w == 0):
target_w = self.master_width
if (target_h == 0):
target_h = self.master_height
rendition_key = '%dx%d' % (target_w, target_h)
if rendition_key in self.renditions:
return self.renditions[rendition_key]
if (target_w != self.master_width or target_h != self.master_height):
r = target_w / target_h
R = float(self.master_width) / self.master_height
if r != R:
if r > R:
crop_w = self.master_width
crop_h = crop_w / r
x = 0
y = int(self.master_height - crop_h) >> 1
else:
crop_h = self.master_height
crop_w = crop_h * r
x = int(self.master_width - crop_w) >> 1
y = 0
image = image.crop((x, y, int(crop_w + x), int(crop_h + y)))
image.thumbnail((int(target_w), int(target_h)), Image.ANTIALIAS)
filename, ext = os.path.splitext(self.get_master_filename())
rendition_name = '%s/%s_%s%s' % (
IMAGE_DIRECTORY,
filename,
rendition_key,
ext
)
fd = BytesIO()
image.save(fd, format)
default_storage.save(rendition_name, fd)
self.renditions[rendition_key] = rendition_name
self.save()
return rendition_name
return self.master.name
|
[
"def",
"make_rendition",
"(",
"self",
",",
"width",
",",
"height",
")",
":",
"image",
"=",
"Image",
".",
"open",
"(",
"self",
".",
"master",
")",
"format",
"=",
"image",
".",
"format",
"target_w",
"=",
"float",
"(",
"width",
")",
"target_h",
"=",
"float",
"(",
"height",
")",
"if",
"(",
"target_w",
"==",
"0",
")",
":",
"target_w",
"=",
"self",
".",
"master_width",
"if",
"(",
"target_h",
"==",
"0",
")",
":",
"target_h",
"=",
"self",
".",
"master_height",
"rendition_key",
"=",
"'%dx%d'",
"%",
"(",
"target_w",
",",
"target_h",
")",
"if",
"rendition_key",
"in",
"self",
".",
"renditions",
":",
"return",
"self",
".",
"renditions",
"[",
"rendition_key",
"]",
"if",
"(",
"target_w",
"!=",
"self",
".",
"master_width",
"or",
"target_h",
"!=",
"self",
".",
"master_height",
")",
":",
"r",
"=",
"target_w",
"/",
"target_h",
"R",
"=",
"float",
"(",
"self",
".",
"master_width",
")",
"/",
"self",
".",
"master_height",
"if",
"r",
"!=",
"R",
":",
"if",
"r",
">",
"R",
":",
"crop_w",
"=",
"self",
".",
"master_width",
"crop_h",
"=",
"crop_w",
"/",
"r",
"x",
"=",
"0",
"y",
"=",
"int",
"(",
"self",
".",
"master_height",
"-",
"crop_h",
")",
">>",
"1",
"else",
":",
"crop_h",
"=",
"self",
".",
"master_height",
"crop_w",
"=",
"crop_h",
"*",
"r",
"x",
"=",
"int",
"(",
"self",
".",
"master_width",
"-",
"crop_w",
")",
">>",
"1",
"y",
"=",
"0",
"image",
"=",
"image",
".",
"crop",
"(",
"(",
"x",
",",
"y",
",",
"int",
"(",
"crop_w",
"+",
"x",
")",
",",
"int",
"(",
"crop_h",
"+",
"y",
")",
")",
")",
"image",
".",
"thumbnail",
"(",
"(",
"int",
"(",
"target_w",
")",
",",
"int",
"(",
"target_h",
")",
")",
",",
"Image",
".",
"ANTIALIAS",
")",
"filename",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"self",
".",
"get_master_filename",
"(",
")",
")",
"rendition_name",
"=",
"'%s/%s_%s%s'",
"%",
"(",
"IMAGE_DIRECTORY",
",",
"filename",
",",
"rendition_key",
",",
"ext",
")",
"fd",
"=",
"BytesIO",
"(",
")",
"image",
".",
"save",
"(",
"fd",
",",
"format",
")",
"default_storage",
".",
"save",
"(",
"rendition_name",
",",
"fd",
")",
"self",
".",
"renditions",
"[",
"rendition_key",
"]",
"=",
"rendition_name",
"self",
".",
"save",
"(",
")",
"return",
"rendition_name",
"return",
"self",
".",
"master",
".",
"name"
] |
build a rendition
0 x 0 -> will give master URL
only width -> will make a renditions with master's aspect ratio
width x height -> will make an image potentialy cropped
|
[
"build",
"a",
"rendition"
] |
6a4326b77709601e18ee04f5626cf475c5ea0bb5
|
https://github.com/rouk1/django-image-renderer/blob/6a4326b77709601e18ee04f5626cf475c5ea0bb5/renderer/models.py#L95-L153
|
238,554
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/release.py
|
execute_actioncollection
|
def execute_actioncollection(obj, actioncollection, confirm=True):
"""Execute the given actioncollection with the given object
:param obj: the object to be processed
:param actioncollection:
:type actioncollection: :class:`ActionCollection`
:param confirm: If True, ask the user to continue, if actions failed.
:type confirm: :class:`bool`
:returns: An action status. If the execution fails but the user confirms, the status will be successful.
:rtype: :class:`ActionStatus`
:raises: None
"""
actioncollection.execute(obj)
status = actioncollection.status()
if status.value == ActionStatus.SUCCESS or not confirm:
return status
ard = ActionReportDialog(actioncollection)
confirmed = ard.exec_()
if confirmed:
msg = "User confirmed to continue although the status was: %s" % status.message,
s = ActionStatus.SUCCESS
tb = status.traceback
else:
s = status.value
msg = "User aborted the actions because the status was: %s" % status.message,
tb = status.traceback
return ActionStatus(s, msg, tb)
|
python
|
def execute_actioncollection(obj, actioncollection, confirm=True):
"""Execute the given actioncollection with the given object
:param obj: the object to be processed
:param actioncollection:
:type actioncollection: :class:`ActionCollection`
:param confirm: If True, ask the user to continue, if actions failed.
:type confirm: :class:`bool`
:returns: An action status. If the execution fails but the user confirms, the status will be successful.
:rtype: :class:`ActionStatus`
:raises: None
"""
actioncollection.execute(obj)
status = actioncollection.status()
if status.value == ActionStatus.SUCCESS or not confirm:
return status
ard = ActionReportDialog(actioncollection)
confirmed = ard.exec_()
if confirmed:
msg = "User confirmed to continue although the status was: %s" % status.message,
s = ActionStatus.SUCCESS
tb = status.traceback
else:
s = status.value
msg = "User aborted the actions because the status was: %s" % status.message,
tb = status.traceback
return ActionStatus(s, msg, tb)
|
[
"def",
"execute_actioncollection",
"(",
"obj",
",",
"actioncollection",
",",
"confirm",
"=",
"True",
")",
":",
"actioncollection",
".",
"execute",
"(",
"obj",
")",
"status",
"=",
"actioncollection",
".",
"status",
"(",
")",
"if",
"status",
".",
"value",
"==",
"ActionStatus",
".",
"SUCCESS",
"or",
"not",
"confirm",
":",
"return",
"status",
"ard",
"=",
"ActionReportDialog",
"(",
"actioncollection",
")",
"confirmed",
"=",
"ard",
".",
"exec_",
"(",
")",
"if",
"confirmed",
":",
"msg",
"=",
"\"User confirmed to continue although the status was: %s\"",
"%",
"status",
".",
"message",
",",
"s",
"=",
"ActionStatus",
".",
"SUCCESS",
"tb",
"=",
"status",
".",
"traceback",
"else",
":",
"s",
"=",
"status",
".",
"value",
"msg",
"=",
"\"User aborted the actions because the status was: %s\"",
"%",
"status",
".",
"message",
",",
"tb",
"=",
"status",
".",
"traceback",
"return",
"ActionStatus",
"(",
"s",
",",
"msg",
",",
"tb",
")"
] |
Execute the given actioncollection with the given object
:param obj: the object to be processed
:param actioncollection:
:type actioncollection: :class:`ActionCollection`
:param confirm: If True, ask the user to continue, if actions failed.
:type confirm: :class:`bool`
:returns: An action status. If the execution fails but the user confirms, the status will be successful.
:rtype: :class:`ActionStatus`
:raises: None
|
[
"Execute",
"the",
"given",
"actioncollection",
"with",
"the",
"given",
"object"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/release.py#L281-L307
|
238,555
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/release.py
|
Release.release
|
def release(self):
"""Create a release
1. Perform Sanity checks on work file.
2. Copy work file to releasefile location.
3. Perform cleanup actions on releasefile.
:returns: True if successfull, False if not.
:rtype: bool
:raises: None
"""
log.info("Releasing: %s", self._workfile.get_fullpath())
ac = self.build_actions()
ac.execute(self)
s = ac.status().value
if not s == ActionStatus.SUCCESS:
ard = ActionReportDialog(ac)
ard.exec_()
pass
return s == ActionStatus.SUCCESS
|
python
|
def release(self):
"""Create a release
1. Perform Sanity checks on work file.
2. Copy work file to releasefile location.
3. Perform cleanup actions on releasefile.
:returns: True if successfull, False if not.
:rtype: bool
:raises: None
"""
log.info("Releasing: %s", self._workfile.get_fullpath())
ac = self.build_actions()
ac.execute(self)
s = ac.status().value
if not s == ActionStatus.SUCCESS:
ard = ActionReportDialog(ac)
ard.exec_()
pass
return s == ActionStatus.SUCCESS
|
[
"def",
"release",
"(",
"self",
")",
":",
"log",
".",
"info",
"(",
"\"Releasing: %s\"",
",",
"self",
".",
"_workfile",
".",
"get_fullpath",
"(",
")",
")",
"ac",
"=",
"self",
".",
"build_actions",
"(",
")",
"ac",
".",
"execute",
"(",
"self",
")",
"s",
"=",
"ac",
".",
"status",
"(",
")",
".",
"value",
"if",
"not",
"s",
"==",
"ActionStatus",
".",
"SUCCESS",
":",
"ard",
"=",
"ActionReportDialog",
"(",
"ac",
")",
"ard",
".",
"exec_",
"(",
")",
"pass",
"return",
"s",
"==",
"ActionStatus",
".",
"SUCCESS"
] |
Create a release
1. Perform Sanity checks on work file.
2. Copy work file to releasefile location.
3. Perform cleanup actions on releasefile.
:returns: True if successfull, False if not.
:rtype: bool
:raises: None
|
[
"Create",
"a",
"release"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/release.py#L54-L73
|
238,556
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/release.py
|
Release.build_actions
|
def build_actions(self):
"""Create an ActionCollection that will perform sanity checks, copy the file,
create a database entry and perform cleanup actions and in case of a failure clean everything up.
:param work: the workfile
:type work: :class:`JB_File`
:param release: the releasefile
:type release: :class:`JB_File`
:param checks: the action collection object with sanity checks
It should accept a :class:`JB_File` as object for execute.
:type checks: :class:`ActionCollection`
:param cleanup: a action collection object that holds cleanup actions for the given file.
It should accept a :class:`JB_File` as object for execute.
:type cleanup: :class:`ActionCollection`
:param comment: comment for the release
:type comment: :class:`str`
:returns: An ActionCollection ready to execute.
:rtype: :class:`ActionCollection`
:raises: None
"""
checkau = ActionUnit("Sanity Checks",
"Check the workfile. If the file is not conform, ask the user to continue.",
self.sanity_check)
copyau = ActionUnit("Copy File",
"Copy the workfile to the releasefile location.",
self.copy,
depsuccess=[checkau])
dbau = ActionUnit("Create DB entry",
"Create an entry in the database for the releasefile",
self.create_db_entry,
depsuccess=[copyau])
cleanau = ActionUnit("Cleanup",
"Cleanup the releasefile. If something fails, ask the user to continue.",
self.cleanup,
depsuccess=[dbau])
deletefau1 = ActionUnit("Delete the releasefile.",
"In case the db entry creation fails, delete the releasefile.",
self.delete_releasefile,
depfail=[dbau])
deletefau2 = ActionUnit("Delete the releasefile.",
"In case the cleanup fails, delete the releasefile.",
self.delete_releasefile,
depsuccess=[copyau],
depfail=[cleanau])
deletedbau = ActionUnit("Delete the database entry.",
"In case the cleanup fails, delete the database entry",
self.delete_db_entry,
depsuccess=[dbau],
depfail=[cleanau])
return ActionCollection([checkau, copyau, dbau, cleanau, deletefau1, deletefau2, deletedbau])
|
python
|
def build_actions(self):
"""Create an ActionCollection that will perform sanity checks, copy the file,
create a database entry and perform cleanup actions and in case of a failure clean everything up.
:param work: the workfile
:type work: :class:`JB_File`
:param release: the releasefile
:type release: :class:`JB_File`
:param checks: the action collection object with sanity checks
It should accept a :class:`JB_File` as object for execute.
:type checks: :class:`ActionCollection`
:param cleanup: a action collection object that holds cleanup actions for the given file.
It should accept a :class:`JB_File` as object for execute.
:type cleanup: :class:`ActionCollection`
:param comment: comment for the release
:type comment: :class:`str`
:returns: An ActionCollection ready to execute.
:rtype: :class:`ActionCollection`
:raises: None
"""
checkau = ActionUnit("Sanity Checks",
"Check the workfile. If the file is not conform, ask the user to continue.",
self.sanity_check)
copyau = ActionUnit("Copy File",
"Copy the workfile to the releasefile location.",
self.copy,
depsuccess=[checkau])
dbau = ActionUnit("Create DB entry",
"Create an entry in the database for the releasefile",
self.create_db_entry,
depsuccess=[copyau])
cleanau = ActionUnit("Cleanup",
"Cleanup the releasefile. If something fails, ask the user to continue.",
self.cleanup,
depsuccess=[dbau])
deletefau1 = ActionUnit("Delete the releasefile.",
"In case the db entry creation fails, delete the releasefile.",
self.delete_releasefile,
depfail=[dbau])
deletefau2 = ActionUnit("Delete the releasefile.",
"In case the cleanup fails, delete the releasefile.",
self.delete_releasefile,
depsuccess=[copyau],
depfail=[cleanau])
deletedbau = ActionUnit("Delete the database entry.",
"In case the cleanup fails, delete the database entry",
self.delete_db_entry,
depsuccess=[dbau],
depfail=[cleanau])
return ActionCollection([checkau, copyau, dbau, cleanau, deletefau1, deletefau2, deletedbau])
|
[
"def",
"build_actions",
"(",
"self",
")",
":",
"checkau",
"=",
"ActionUnit",
"(",
"\"Sanity Checks\"",
",",
"\"Check the workfile. If the file is not conform, ask the user to continue.\"",
",",
"self",
".",
"sanity_check",
")",
"copyau",
"=",
"ActionUnit",
"(",
"\"Copy File\"",
",",
"\"Copy the workfile to the releasefile location.\"",
",",
"self",
".",
"copy",
",",
"depsuccess",
"=",
"[",
"checkau",
"]",
")",
"dbau",
"=",
"ActionUnit",
"(",
"\"Create DB entry\"",
",",
"\"Create an entry in the database for the releasefile\"",
",",
"self",
".",
"create_db_entry",
",",
"depsuccess",
"=",
"[",
"copyau",
"]",
")",
"cleanau",
"=",
"ActionUnit",
"(",
"\"Cleanup\"",
",",
"\"Cleanup the releasefile. If something fails, ask the user to continue.\"",
",",
"self",
".",
"cleanup",
",",
"depsuccess",
"=",
"[",
"dbau",
"]",
")",
"deletefau1",
"=",
"ActionUnit",
"(",
"\"Delete the releasefile.\"",
",",
"\"In case the db entry creation fails, delete the releasefile.\"",
",",
"self",
".",
"delete_releasefile",
",",
"depfail",
"=",
"[",
"dbau",
"]",
")",
"deletefau2",
"=",
"ActionUnit",
"(",
"\"Delete the releasefile.\"",
",",
"\"In case the cleanup fails, delete the releasefile.\"",
",",
"self",
".",
"delete_releasefile",
",",
"depsuccess",
"=",
"[",
"copyau",
"]",
",",
"depfail",
"=",
"[",
"cleanau",
"]",
")",
"deletedbau",
"=",
"ActionUnit",
"(",
"\"Delete the database entry.\"",
",",
"\"In case the cleanup fails, delete the database entry\"",
",",
"self",
".",
"delete_db_entry",
",",
"depsuccess",
"=",
"[",
"dbau",
"]",
",",
"depfail",
"=",
"[",
"cleanau",
"]",
")",
"return",
"ActionCollection",
"(",
"[",
"checkau",
",",
"copyau",
",",
"dbau",
",",
"cleanau",
",",
"deletefau1",
",",
"deletefau2",
",",
"deletedbau",
"]",
")"
] |
Create an ActionCollection that will perform sanity checks, copy the file,
create a database entry and perform cleanup actions and in case of a failure clean everything up.
:param work: the workfile
:type work: :class:`JB_File`
:param release: the releasefile
:type release: :class:`JB_File`
:param checks: the action collection object with sanity checks
It should accept a :class:`JB_File` as object for execute.
:type checks: :class:`ActionCollection`
:param cleanup: a action collection object that holds cleanup actions for the given file.
It should accept a :class:`JB_File` as object for execute.
:type cleanup: :class:`ActionCollection`
:param comment: comment for the release
:type comment: :class:`str`
:returns: An ActionCollection ready to execute.
:rtype: :class:`ActionCollection`
:raises: None
|
[
"Create",
"an",
"ActionCollection",
"that",
"will",
"perform",
"sanity",
"checks",
"copy",
"the",
"file",
"create",
"a",
"database",
"entry",
"and",
"perform",
"cleanup",
"actions",
"and",
"in",
"case",
"of",
"a",
"failure",
"clean",
"everything",
"up",
"."
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/release.py#L75-L124
|
238,557
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/release.py
|
Release.sanity_check
|
def sanity_check(self, release):
"""Perform sanity checks on the workfile of the given release
This is inteded to be used in a action unit.
:param release: the release with the workfile and sanity checks
:type release: :class:`Release`
:returns: the action status of the sanity checks
:rtype: :class:`ActionStatus`
:raises: None
"""
log.info("Performing sanity checks.")
return execute_actioncollection(release._workfile, actioncollection=release._checks, confirm=True)
|
python
|
def sanity_check(self, release):
"""Perform sanity checks on the workfile of the given release
This is inteded to be used in a action unit.
:param release: the release with the workfile and sanity checks
:type release: :class:`Release`
:returns: the action status of the sanity checks
:rtype: :class:`ActionStatus`
:raises: None
"""
log.info("Performing sanity checks.")
return execute_actioncollection(release._workfile, actioncollection=release._checks, confirm=True)
|
[
"def",
"sanity_check",
"(",
"self",
",",
"release",
")",
":",
"log",
".",
"info",
"(",
"\"Performing sanity checks.\"",
")",
"return",
"execute_actioncollection",
"(",
"release",
".",
"_workfile",
",",
"actioncollection",
"=",
"release",
".",
"_checks",
",",
"confirm",
"=",
"True",
")"
] |
Perform sanity checks on the workfile of the given release
This is inteded to be used in a action unit.
:param release: the release with the workfile and sanity checks
:type release: :class:`Release`
:returns: the action status of the sanity checks
:rtype: :class:`ActionStatus`
:raises: None
|
[
"Perform",
"sanity",
"checks",
"on",
"the",
"workfile",
"of",
"the",
"given",
"release"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/release.py#L126-L138
|
238,558
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/release.py
|
Release.copy
|
def copy(self, release):
"""Copy the workfile of the given release to the releasefile location
This is inteded to be used in a action unit.
:param release: the release with the release and workfile
:type release: :class:`Release`
:returns: an action status
:rtype: :class:`ActionStatus`
:raises: None
"""
workfp = release._workfile.get_fullpath()
releasefp = release._releasefile.get_fullpath()
copy_file(release._workfile, release._releasefile)
return ActionStatus(ActionStatus.SUCCESS,
msg="Copied %s to %s location." % (workfp,
releasefp))
|
python
|
def copy(self, release):
"""Copy the workfile of the given release to the releasefile location
This is inteded to be used in a action unit.
:param release: the release with the release and workfile
:type release: :class:`Release`
:returns: an action status
:rtype: :class:`ActionStatus`
:raises: None
"""
workfp = release._workfile.get_fullpath()
releasefp = release._releasefile.get_fullpath()
copy_file(release._workfile, release._releasefile)
return ActionStatus(ActionStatus.SUCCESS,
msg="Copied %s to %s location." % (workfp,
releasefp))
|
[
"def",
"copy",
"(",
"self",
",",
"release",
")",
":",
"workfp",
"=",
"release",
".",
"_workfile",
".",
"get_fullpath",
"(",
")",
"releasefp",
"=",
"release",
".",
"_releasefile",
".",
"get_fullpath",
"(",
")",
"copy_file",
"(",
"release",
".",
"_workfile",
",",
"release",
".",
"_releasefile",
")",
"return",
"ActionStatus",
"(",
"ActionStatus",
".",
"SUCCESS",
",",
"msg",
"=",
"\"Copied %s to %s location.\"",
"%",
"(",
"workfp",
",",
"releasefp",
")",
")"
] |
Copy the workfile of the given release to the releasefile location
This is inteded to be used in a action unit.
:param release: the release with the release and workfile
:type release: :class:`Release`
:returns: an action status
:rtype: :class:`ActionStatus`
:raises: None
|
[
"Copy",
"the",
"workfile",
"of",
"the",
"given",
"release",
"to",
"the",
"releasefile",
"location"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/release.py#L140-L156
|
238,559
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/release.py
|
Release.create_db_entry
|
def create_db_entry(self, release):
"""Create a db entry for releasefile of the given release
Set _releasedbentry and _commentdbentry of the given release file
This is inteded to be used in a action unit.
:param release: the release with the releasefile and comment
:type release: :class:`Release`
:returns: an action status
:rtype: :class:`ActionStatus`
:raises: ValidationError, If the comment could not be created, the TaskFile is deleted and the Exception is propagated.
"""
log.info("Create database entry with comment: %s", release.comment)
tfi = release._releasefile.get_obj()
tf, note = tfi.create_db_entry(release.comment)
release._releasedbentry = tf
release._commentdbentry = note
return ActionStatus(ActionStatus.SUCCESS,
msg="Created database entry for the release filw with comment: %s" % release.comment)
|
python
|
def create_db_entry(self, release):
"""Create a db entry for releasefile of the given release
Set _releasedbentry and _commentdbentry of the given release file
This is inteded to be used in a action unit.
:param release: the release with the releasefile and comment
:type release: :class:`Release`
:returns: an action status
:rtype: :class:`ActionStatus`
:raises: ValidationError, If the comment could not be created, the TaskFile is deleted and the Exception is propagated.
"""
log.info("Create database entry with comment: %s", release.comment)
tfi = release._releasefile.get_obj()
tf, note = tfi.create_db_entry(release.comment)
release._releasedbentry = tf
release._commentdbentry = note
return ActionStatus(ActionStatus.SUCCESS,
msg="Created database entry for the release filw with comment: %s" % release.comment)
|
[
"def",
"create_db_entry",
"(",
"self",
",",
"release",
")",
":",
"log",
".",
"info",
"(",
"\"Create database entry with comment: %s\"",
",",
"release",
".",
"comment",
")",
"tfi",
"=",
"release",
".",
"_releasefile",
".",
"get_obj",
"(",
")",
"tf",
",",
"note",
"=",
"tfi",
".",
"create_db_entry",
"(",
"release",
".",
"comment",
")",
"release",
".",
"_releasedbentry",
"=",
"tf",
"release",
".",
"_commentdbentry",
"=",
"note",
"return",
"ActionStatus",
"(",
"ActionStatus",
".",
"SUCCESS",
",",
"msg",
"=",
"\"Created database entry for the release filw with comment: %s\"",
"%",
"release",
".",
"comment",
")"
] |
Create a db entry for releasefile of the given release
Set _releasedbentry and _commentdbentry of the given release file
This is inteded to be used in a action unit.
:param release: the release with the releasefile and comment
:type release: :class:`Release`
:returns: an action status
:rtype: :class:`ActionStatus`
:raises: ValidationError, If the comment could not be created, the TaskFile is deleted and the Exception is propagated.
|
[
"Create",
"a",
"db",
"entry",
"for",
"releasefile",
"of",
"the",
"given",
"release"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/release.py#L158-L177
|
238,560
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/release.py
|
Release.cleanup
|
def cleanup(self, release):
"""Perform cleanup actions on the releasefile of the given release
This is inteded to be used in a action unit.
:param release: the release with the releasefile and cleanup actions
:type release: :class:`Release`
:returns: the action status of the cleanup actions
:rtype: :class:`ActionStatus`
:raises: None
"""
log.info("Performing cleanup.")
return execute_actioncollection(release._releasefile, actioncollection=release._cleanup, confirm=True)
|
python
|
def cleanup(self, release):
"""Perform cleanup actions on the releasefile of the given release
This is inteded to be used in a action unit.
:param release: the release with the releasefile and cleanup actions
:type release: :class:`Release`
:returns: the action status of the cleanup actions
:rtype: :class:`ActionStatus`
:raises: None
"""
log.info("Performing cleanup.")
return execute_actioncollection(release._releasefile, actioncollection=release._cleanup, confirm=True)
|
[
"def",
"cleanup",
"(",
"self",
",",
"release",
")",
":",
"log",
".",
"info",
"(",
"\"Performing cleanup.\"",
")",
"return",
"execute_actioncollection",
"(",
"release",
".",
"_releasefile",
",",
"actioncollection",
"=",
"release",
".",
"_cleanup",
",",
"confirm",
"=",
"True",
")"
] |
Perform cleanup actions on the releasefile of the given release
This is inteded to be used in a action unit.
:param release: the release with the releasefile and cleanup actions
:type release: :class:`Release`
:returns: the action status of the cleanup actions
:rtype: :class:`ActionStatus`
:raises: None
|
[
"Perform",
"cleanup",
"actions",
"on",
"the",
"releasefile",
"of",
"the",
"given",
"release"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/release.py#L179-L191
|
238,561
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/release.py
|
Release.delete_releasefile
|
def delete_releasefile(self, release):
"""Delete the releasefile of the given release
This is inteded to be used in a action unit.
:param release: the release with the releasefile
:type release: :class:`Release`
:returns: an action status
:rtype: :class:`ActionStatus`
:raises: None
"""
fp = release._releasefile.get_fullpath()
log.info("Deleting release file %s", fp)
delete_file(release._releasefile)
return ActionStatus(ActionStatus.SUCCESS,
msg="Deleted %s" % fp)
|
python
|
def delete_releasefile(self, release):
"""Delete the releasefile of the given release
This is inteded to be used in a action unit.
:param release: the release with the releasefile
:type release: :class:`Release`
:returns: an action status
:rtype: :class:`ActionStatus`
:raises: None
"""
fp = release._releasefile.get_fullpath()
log.info("Deleting release file %s", fp)
delete_file(release._releasefile)
return ActionStatus(ActionStatus.SUCCESS,
msg="Deleted %s" % fp)
|
[
"def",
"delete_releasefile",
"(",
"self",
",",
"release",
")",
":",
"fp",
"=",
"release",
".",
"_releasefile",
".",
"get_fullpath",
"(",
")",
"log",
".",
"info",
"(",
"\"Deleting release file %s\"",
",",
"fp",
")",
"delete_file",
"(",
"release",
".",
"_releasefile",
")",
"return",
"ActionStatus",
"(",
"ActionStatus",
".",
"SUCCESS",
",",
"msg",
"=",
"\"Deleted %s\"",
"%",
"fp",
")"
] |
Delete the releasefile of the given release
This is inteded to be used in a action unit.
:param release: the release with the releasefile
:type release: :class:`Release`
:returns: an action status
:rtype: :class:`ActionStatus`
:raises: None
|
[
"Delete",
"the",
"releasefile",
"of",
"the",
"given",
"release"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/release.py#L193-L208
|
238,562
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/release.py
|
Release.delete_db_entry
|
def delete_db_entry(self, release):
"""Delete the db entries for releasefile and comment of the given release
:param release: the release with the releasefile and comment db entries
:type release: :class:`Release`
:returns: an action status
:rtype: :class:`ActionStatus`
:raises: None
"""
log.info("Delete database entry for file.")
release._releasedbentry.delete()
log.info("Delete database entry for comment.")
release._commentdbentry.delete()
return ActionStatus(ActionStatus.SUCCESS,
msg="Deleted database entries for releasefile and comment")
|
python
|
def delete_db_entry(self, release):
"""Delete the db entries for releasefile and comment of the given release
:param release: the release with the releasefile and comment db entries
:type release: :class:`Release`
:returns: an action status
:rtype: :class:`ActionStatus`
:raises: None
"""
log.info("Delete database entry for file.")
release._releasedbentry.delete()
log.info("Delete database entry for comment.")
release._commentdbentry.delete()
return ActionStatus(ActionStatus.SUCCESS,
msg="Deleted database entries for releasefile and comment")
|
[
"def",
"delete_db_entry",
"(",
"self",
",",
"release",
")",
":",
"log",
".",
"info",
"(",
"\"Delete database entry for file.\"",
")",
"release",
".",
"_releasedbentry",
".",
"delete",
"(",
")",
"log",
".",
"info",
"(",
"\"Delete database entry for comment.\"",
")",
"release",
".",
"_commentdbentry",
".",
"delete",
"(",
")",
"return",
"ActionStatus",
"(",
"ActionStatus",
".",
"SUCCESS",
",",
"msg",
"=",
"\"Deleted database entries for releasefile and comment\"",
")"
] |
Delete the db entries for releasefile and comment of the given release
:param release: the release with the releasefile and comment db entries
:type release: :class:`Release`
:returns: an action status
:rtype: :class:`ActionStatus`
:raises: None
|
[
"Delete",
"the",
"db",
"entries",
"for",
"releasefile",
"and",
"comment",
"of",
"the",
"given",
"release"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/release.py#L210-L224
|
238,563
|
williamfzc/ConnectionTracer
|
ConnectionTracer/utils.py
|
socket_reader
|
def socket_reader(connection: socket, buffer_size: int = 1024):
""" read data from adb socket """
while connection is not None:
try:
buffer = connection.recv(buffer_size)
# no output
if not len(buffer):
raise ConnectionAbortedError
except ConnectionAbortedError:
# socket closed
print('connection aborted')
connection.close()
yield None
except OSError:
# still operate connection after it was closed
print('socket closed')
connection.close()
yield None
else:
yield buffer
|
python
|
def socket_reader(connection: socket, buffer_size: int = 1024):
""" read data from adb socket """
while connection is not None:
try:
buffer = connection.recv(buffer_size)
# no output
if not len(buffer):
raise ConnectionAbortedError
except ConnectionAbortedError:
# socket closed
print('connection aborted')
connection.close()
yield None
except OSError:
# still operate connection after it was closed
print('socket closed')
connection.close()
yield None
else:
yield buffer
|
[
"def",
"socket_reader",
"(",
"connection",
":",
"socket",
",",
"buffer_size",
":",
"int",
"=",
"1024",
")",
":",
"while",
"connection",
"is",
"not",
"None",
":",
"try",
":",
"buffer",
"=",
"connection",
".",
"recv",
"(",
"buffer_size",
")",
"# no output",
"if",
"not",
"len",
"(",
"buffer",
")",
":",
"raise",
"ConnectionAbortedError",
"except",
"ConnectionAbortedError",
":",
"# socket closed",
"print",
"(",
"'connection aborted'",
")",
"connection",
".",
"close",
"(",
")",
"yield",
"None",
"except",
"OSError",
":",
"# still operate connection after it was closed",
"print",
"(",
"'socket closed'",
")",
"connection",
".",
"close",
"(",
")",
"yield",
"None",
"else",
":",
"yield",
"buffer"
] |
read data from adb socket
|
[
"read",
"data",
"from",
"adb",
"socket"
] |
190003e374d6903cb82d2d21a1378979dc419ed3
|
https://github.com/williamfzc/ConnectionTracer/blob/190003e374d6903cb82d2d21a1378979dc419ed3/ConnectionTracer/utils.py#L13-L32
|
238,564
|
williamfzc/ConnectionTracer
|
ConnectionTracer/utils.py
|
decode_response
|
def decode_response(content: bytes) -> set:
""" adb response text -> device set """
content = content[4:].decode(config.ENCODING)
if '\t' not in content and '\n' not in content:
return set()
connected_devices = set()
device_list = [i for i in content.split('\n') if i]
for each_device in device_list:
device_id, device_status = each_device.split('\t')
if device_status == 'device':
connected_devices.add(device_id)
return connected_devices
|
python
|
def decode_response(content: bytes) -> set:
""" adb response text -> device set """
content = content[4:].decode(config.ENCODING)
if '\t' not in content and '\n' not in content:
return set()
connected_devices = set()
device_list = [i for i in content.split('\n') if i]
for each_device in device_list:
device_id, device_status = each_device.split('\t')
if device_status == 'device':
connected_devices.add(device_id)
return connected_devices
|
[
"def",
"decode_response",
"(",
"content",
":",
"bytes",
")",
"->",
"set",
":",
"content",
"=",
"content",
"[",
"4",
":",
"]",
".",
"decode",
"(",
"config",
".",
"ENCODING",
")",
"if",
"'\\t'",
"not",
"in",
"content",
"and",
"'\\n'",
"not",
"in",
"content",
":",
"return",
"set",
"(",
")",
"connected_devices",
"=",
"set",
"(",
")",
"device_list",
"=",
"[",
"i",
"for",
"i",
"in",
"content",
".",
"split",
"(",
"'\\n'",
")",
"if",
"i",
"]",
"for",
"each_device",
"in",
"device_list",
":",
"device_id",
",",
"device_status",
"=",
"each_device",
".",
"split",
"(",
"'\\t'",
")",
"if",
"device_status",
"==",
"'device'",
":",
"connected_devices",
".",
"add",
"(",
"device_id",
")",
"return",
"connected_devices"
] |
adb response text -> device set
|
[
"adb",
"response",
"text",
"-",
">",
"device",
"set"
] |
190003e374d6903cb82d2d21a1378979dc419ed3
|
https://github.com/williamfzc/ConnectionTracer/blob/190003e374d6903cb82d2d21a1378979dc419ed3/ConnectionTracer/utils.py#L36-L48
|
238,565
|
demosdemon/format-pipfile
|
format_pipfile/cli.py
|
main
|
def main(requirements_file, skip_requirements_file, pipfile, skip_pipfile):
# type: (str, bool, str, bool) -> None
"""Update the requirements.txt file and reformat the Pipfile."""
pipfile_path = path.Path(pipfile)
pf = load_pipfile(pipfile_path)
if not skip_requirements_file:
requirements_file_path = path.Path(requirements_file)
update_requirements(requirements_file_path, pf)
if not skip_pipfile:
dump_pipfile(pipfile_path, pf)
|
python
|
def main(requirements_file, skip_requirements_file, pipfile, skip_pipfile):
# type: (str, bool, str, bool) -> None
"""Update the requirements.txt file and reformat the Pipfile."""
pipfile_path = path.Path(pipfile)
pf = load_pipfile(pipfile_path)
if not skip_requirements_file:
requirements_file_path = path.Path(requirements_file)
update_requirements(requirements_file_path, pf)
if not skip_pipfile:
dump_pipfile(pipfile_path, pf)
|
[
"def",
"main",
"(",
"requirements_file",
",",
"skip_requirements_file",
",",
"pipfile",
",",
"skip_pipfile",
")",
":",
"# type: (str, bool, str, bool) -> None",
"pipfile_path",
"=",
"path",
".",
"Path",
"(",
"pipfile",
")",
"pf",
"=",
"load_pipfile",
"(",
"pipfile_path",
")",
"if",
"not",
"skip_requirements_file",
":",
"requirements_file_path",
"=",
"path",
".",
"Path",
"(",
"requirements_file",
")",
"update_requirements",
"(",
"requirements_file_path",
",",
"pf",
")",
"if",
"not",
"skip_pipfile",
":",
"dump_pipfile",
"(",
"pipfile_path",
",",
"pf",
")"
] |
Update the requirements.txt file and reformat the Pipfile.
|
[
"Update",
"the",
"requirements",
".",
"txt",
"file",
"and",
"reformat",
"the",
"Pipfile",
"."
] |
f95162c49d8fc13153080ddb11ac5a5dcd4d2e7c
|
https://github.com/demosdemon/format-pipfile/blob/f95162c49d8fc13153080ddb11ac5a5dcd4d2e7c/format_pipfile/cli.py#L238-L249
|
238,566
|
rbarrois/django-batchform
|
batchform/forms.py
|
BaseUploadForm.clean_file
|
def clean_file(self):
"""Analyse the uploaded file, and return the parsed lines.
Returns:
tuple of tuples of cells content (as text).
"""
data = self.cleaned_data['file']
available_parsers = self.get_parsers()
for parser in available_parsers:
try:
return parser.parse_file(data)
except parsers.ParserError:
pass
raise forms.ValidationError(
"No parser could read the file. Tried with parsers %s." %
(", " % (force_text(p) for p in available_parsers)))
|
python
|
def clean_file(self):
"""Analyse the uploaded file, and return the parsed lines.
Returns:
tuple of tuples of cells content (as text).
"""
data = self.cleaned_data['file']
available_parsers = self.get_parsers()
for parser in available_parsers:
try:
return parser.parse_file(data)
except parsers.ParserError:
pass
raise forms.ValidationError(
"No parser could read the file. Tried with parsers %s." %
(", " % (force_text(p) for p in available_parsers)))
|
[
"def",
"clean_file",
"(",
"self",
")",
":",
"data",
"=",
"self",
".",
"cleaned_data",
"[",
"'file'",
"]",
"available_parsers",
"=",
"self",
".",
"get_parsers",
"(",
")",
"for",
"parser",
"in",
"available_parsers",
":",
"try",
":",
"return",
"parser",
".",
"parse_file",
"(",
"data",
")",
"except",
"parsers",
".",
"ParserError",
":",
"pass",
"raise",
"forms",
".",
"ValidationError",
"(",
"\"No parser could read the file. Tried with parsers %s.\"",
"%",
"(",
"\", \"",
"%",
"(",
"force_text",
"(",
"p",
")",
"for",
"p",
"in",
"available_parsers",
")",
")",
")"
] |
Analyse the uploaded file, and return the parsed lines.
Returns:
tuple of tuples of cells content (as text).
|
[
"Analyse",
"the",
"uploaded",
"file",
"and",
"return",
"the",
"parsed",
"lines",
"."
] |
f6b659a6790750285af248ccd1d4d178ecbad129
|
https://github.com/rbarrois/django-batchform/blob/f6b659a6790750285af248ccd1d4d178ecbad129/batchform/forms.py#L25-L43
|
238,567
|
rbarrois/django-batchform
|
batchform/forms.py
|
LineFormSet.clean
|
def clean(self):
"""Global cleanup."""
super(LineFormSet, self).clean()
if any(self.errors):
# Already seen errors, let's skip.
return
self.clean_unique_fields()
|
python
|
def clean(self):
"""Global cleanup."""
super(LineFormSet, self).clean()
if any(self.errors):
# Already seen errors, let's skip.
return
self.clean_unique_fields()
|
[
"def",
"clean",
"(",
"self",
")",
":",
"super",
"(",
"LineFormSet",
",",
"self",
")",
".",
"clean",
"(",
")",
"if",
"any",
"(",
"self",
".",
"errors",
")",
":",
"# Already seen errors, let's skip.",
"return",
"self",
".",
"clean_unique_fields",
"(",
")"
] |
Global cleanup.
|
[
"Global",
"cleanup",
"."
] |
f6b659a6790750285af248ccd1d4d178ecbad129
|
https://github.com/rbarrois/django-batchform/blob/f6b659a6790750285af248ccd1d4d178ecbad129/batchform/forms.py#L59-L67
|
238,568
|
rbarrois/django-batchform
|
batchform/forms.py
|
LineFormSet.clean_unique_fields
|
def clean_unique_fields(self):
"""Ensure 'unique fields' are unique among entered data."""
if not self.unique_fields:
return
keys = set()
duplicates = []
for form in self.forms:
key = tuple(form.cleaned_data[field] for field in self.unique_fields)
if key in keys:
duplicates.append(",".join(key))
else:
keys.add(key)
if duplicates:
raise forms.ValidationError(
"Fields %s should be unique; found duplicates for %s" % (
','.join(self.unique_fields), duplicates))
|
python
|
def clean_unique_fields(self):
"""Ensure 'unique fields' are unique among entered data."""
if not self.unique_fields:
return
keys = set()
duplicates = []
for form in self.forms:
key = tuple(form.cleaned_data[field] for field in self.unique_fields)
if key in keys:
duplicates.append(",".join(key))
else:
keys.add(key)
if duplicates:
raise forms.ValidationError(
"Fields %s should be unique; found duplicates for %s" % (
','.join(self.unique_fields), duplicates))
|
[
"def",
"clean_unique_fields",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"unique_fields",
":",
"return",
"keys",
"=",
"set",
"(",
")",
"duplicates",
"=",
"[",
"]",
"for",
"form",
"in",
"self",
".",
"forms",
":",
"key",
"=",
"tuple",
"(",
"form",
".",
"cleaned_data",
"[",
"field",
"]",
"for",
"field",
"in",
"self",
".",
"unique_fields",
")",
"if",
"key",
"in",
"keys",
":",
"duplicates",
".",
"append",
"(",
"\",\"",
".",
"join",
"(",
"key",
")",
")",
"else",
":",
"keys",
".",
"add",
"(",
"key",
")",
"if",
"duplicates",
":",
"raise",
"forms",
".",
"ValidationError",
"(",
"\"Fields %s should be unique; found duplicates for %s\"",
"%",
"(",
"','",
".",
"join",
"(",
"self",
".",
"unique_fields",
")",
",",
"duplicates",
")",
")"
] |
Ensure 'unique fields' are unique among entered data.
|
[
"Ensure",
"unique",
"fields",
"are",
"unique",
"among",
"entered",
"data",
"."
] |
f6b659a6790750285af248ccd1d4d178ecbad129
|
https://github.com/rbarrois/django-batchform/blob/f6b659a6790750285af248ccd1d4d178ecbad129/batchform/forms.py#L69-L87
|
238,569
|
rvswift/EB
|
EB/builder/exhaustive/exhaustive.py
|
run
|
def run(itf):
"""
Run optimize functions.
"""
if not itf:
return 1
# access user input
options = SplitInput(itf)
# read input
inputpath = os.path.abspath(options.inputpath)
print(" Reading input file ...")
molecules = csv_interface.read_csv(inputpath, options)
if not molecules:
print("\n '{flag}' was unable to be parsed\n".format(flag=os.path.basename(options.inputpath)))
sys.exit(1)
# determine the sort order & ensemble_size
#sort_order = classification.get_sort_order(molecules)
sort_order = 'asc'
ensemble_size = options.ensemble_size
# loop over all ensembles
# temp 2/3/15 append to auc_list ef_list & write it out for later histogram construction
auc_list = []
ef_list = []
for size in [x + 1 for x in range(ensemble_size)]:
auc, ef = optimizor(molecules, sort_order, size, options)
auc_list += auc
ef_list += ef
# temp 2/9/15 write auc_list & ef_list out to files for subsequent post-processing
f = open('auc_histogram.csv', 'w')
for value in auc_list:
f.write('%f\n' % value)
#f.write('%f, %s\n' % (value[0], value[1]))
f.close()
f = open('ef_histogram.csv', 'w')
for value in ef_list:
f.write('%f\n' % value)
f.close()
|
python
|
def run(itf):
"""
Run optimize functions.
"""
if not itf:
return 1
# access user input
options = SplitInput(itf)
# read input
inputpath = os.path.abspath(options.inputpath)
print(" Reading input file ...")
molecules = csv_interface.read_csv(inputpath, options)
if not molecules:
print("\n '{flag}' was unable to be parsed\n".format(flag=os.path.basename(options.inputpath)))
sys.exit(1)
# determine the sort order & ensemble_size
#sort_order = classification.get_sort_order(molecules)
sort_order = 'asc'
ensemble_size = options.ensemble_size
# loop over all ensembles
# temp 2/3/15 append to auc_list ef_list & write it out for later histogram construction
auc_list = []
ef_list = []
for size in [x + 1 for x in range(ensemble_size)]:
auc, ef = optimizor(molecules, sort_order, size, options)
auc_list += auc
ef_list += ef
# temp 2/9/15 write auc_list & ef_list out to files for subsequent post-processing
f = open('auc_histogram.csv', 'w')
for value in auc_list:
f.write('%f\n' % value)
#f.write('%f, %s\n' % (value[0], value[1]))
f.close()
f = open('ef_histogram.csv', 'w')
for value in ef_list:
f.write('%f\n' % value)
f.close()
|
[
"def",
"run",
"(",
"itf",
")",
":",
"if",
"not",
"itf",
":",
"return",
"1",
"# access user input",
"options",
"=",
"SplitInput",
"(",
"itf",
")",
"# read input",
"inputpath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"options",
".",
"inputpath",
")",
"print",
"(",
"\" Reading input file ...\"",
")",
"molecules",
"=",
"csv_interface",
".",
"read_csv",
"(",
"inputpath",
",",
"options",
")",
"if",
"not",
"molecules",
":",
"print",
"(",
"\"\\n '{flag}' was unable to be parsed\\n\"",
".",
"format",
"(",
"flag",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"options",
".",
"inputpath",
")",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"# determine the sort order & ensemble_size",
"#sort_order = classification.get_sort_order(molecules)",
"sort_order",
"=",
"'asc'",
"ensemble_size",
"=",
"options",
".",
"ensemble_size",
"# loop over all ensembles",
"# temp 2/3/15 append to auc_list ef_list & write it out for later histogram construction",
"auc_list",
"=",
"[",
"]",
"ef_list",
"=",
"[",
"]",
"for",
"size",
"in",
"[",
"x",
"+",
"1",
"for",
"x",
"in",
"range",
"(",
"ensemble_size",
")",
"]",
":",
"auc",
",",
"ef",
"=",
"optimizor",
"(",
"molecules",
",",
"sort_order",
",",
"size",
",",
"options",
")",
"auc_list",
"+=",
"auc",
"ef_list",
"+=",
"ef",
"# temp 2/9/15 write auc_list & ef_list out to files for subsequent post-processing",
"f",
"=",
"open",
"(",
"'auc_histogram.csv'",
",",
"'w'",
")",
"for",
"value",
"in",
"auc_list",
":",
"f",
".",
"write",
"(",
"'%f\\n'",
"%",
"value",
")",
"#f.write('%f, %s\\n' % (value[0], value[1]))",
"f",
".",
"close",
"(",
")",
"f",
"=",
"open",
"(",
"'ef_histogram.csv'",
",",
"'w'",
")",
"for",
"value",
"in",
"ef_list",
":",
"f",
".",
"write",
"(",
"'%f\\n'",
"%",
"value",
")",
"f",
".",
"close",
"(",
")"
] |
Run optimize functions.
|
[
"Run",
"optimize",
"functions",
"."
] |
341880b79faf8147dc9fa6e90438531cd09fabcc
|
https://github.com/rvswift/EB/blob/341880b79faf8147dc9fa6e90438531cd09fabcc/EB/builder/exhaustive/exhaustive.py#L19-L60
|
238,570
|
rvswift/EB
|
EB/builder/exhaustive/exhaustive.py
|
optimizor
|
def optimizor(molecules, sort_order, ensemble_size, options):
"""
Evaluate the performance of all ensembles of fixed size.
"""
# set variables
ncpu = options.ncpu
score_field = options.score_field
# generate an exhaustive list of all possible ensembles
ensemble_list = make_ensemble_list(molecules, score_field, ensemble_size)
# set number of processors.
if not ncpu:
ncpu = multiprocessing.cpu_count()
if ncpu > 1:
print("Determining the performance of {d} ensembles using {n} processors".format(d=len(ensemble_list), n=ncpu))
if ncpu > len(ensemble_list):
ncpu = len(ensemble_list)
jobs = []
output_queue = multiprocessing.Queue()
for ensemble_chunk in chunker(ensemble_list, ncpu):
p = multiprocessing.Process(target=evaluate,
args=(molecules, ensemble_chunk, sort_order, options, output_queue))
jobs.append(p)
p.start()
# collect results into a dictionary
results = {}
for i in range(len(jobs)):
results.update(output_queue.get())
# stop jobs
for j in jobs:
j.join()
else:
print("Determining the performance of {d} ensembles using {n} processor".format(d=len(ensemble_list), n=ncpu))
results = evaluate(molecules, ensemble_list, sort_order, options)
# peel away the best performing ensemble
ensemble = screener.find_best_ensemble(results, options)
# write out the best performing ensemble
output.write_ensemble(list(ensemble), options)
# temp 2/9/15 generate and return a list of auc values and ef at fpf = 0.001 to build up a histogram
nd = max([results[x].ef.keys() for x in results.keys()][0])
n = int(round(0.001 * nd))
ef_list = [results[x].get_prop(n, 'ef') for x in results.keys()]
auc_list = [results[x].get_prop('auc') for x in results.keys()]
# auc_list = [[results[x].get_prop('auc'), results[x].get_prop('ensemble')] for x in results.keys()]
return auc_list, ef_list
|
python
|
def optimizor(molecules, sort_order, ensemble_size, options):
"""
Evaluate the performance of all ensembles of fixed size.
"""
# set variables
ncpu = options.ncpu
score_field = options.score_field
# generate an exhaustive list of all possible ensembles
ensemble_list = make_ensemble_list(molecules, score_field, ensemble_size)
# set number of processors.
if not ncpu:
ncpu = multiprocessing.cpu_count()
if ncpu > 1:
print("Determining the performance of {d} ensembles using {n} processors".format(d=len(ensemble_list), n=ncpu))
if ncpu > len(ensemble_list):
ncpu = len(ensemble_list)
jobs = []
output_queue = multiprocessing.Queue()
for ensemble_chunk in chunker(ensemble_list, ncpu):
p = multiprocessing.Process(target=evaluate,
args=(molecules, ensemble_chunk, sort_order, options, output_queue))
jobs.append(p)
p.start()
# collect results into a dictionary
results = {}
for i in range(len(jobs)):
results.update(output_queue.get())
# stop jobs
for j in jobs:
j.join()
else:
print("Determining the performance of {d} ensembles using {n} processor".format(d=len(ensemble_list), n=ncpu))
results = evaluate(molecules, ensemble_list, sort_order, options)
# peel away the best performing ensemble
ensemble = screener.find_best_ensemble(results, options)
# write out the best performing ensemble
output.write_ensemble(list(ensemble), options)
# temp 2/9/15 generate and return a list of auc values and ef at fpf = 0.001 to build up a histogram
nd = max([results[x].ef.keys() for x in results.keys()][0])
n = int(round(0.001 * nd))
ef_list = [results[x].get_prop(n, 'ef') for x in results.keys()]
auc_list = [results[x].get_prop('auc') for x in results.keys()]
# auc_list = [[results[x].get_prop('auc'), results[x].get_prop('ensemble')] for x in results.keys()]
return auc_list, ef_list
|
[
"def",
"optimizor",
"(",
"molecules",
",",
"sort_order",
",",
"ensemble_size",
",",
"options",
")",
":",
"# set variables",
"ncpu",
"=",
"options",
".",
"ncpu",
"score_field",
"=",
"options",
".",
"score_field",
"# generate an exhaustive list of all possible ensembles",
"ensemble_list",
"=",
"make_ensemble_list",
"(",
"molecules",
",",
"score_field",
",",
"ensemble_size",
")",
"# set number of processors.",
"if",
"not",
"ncpu",
":",
"ncpu",
"=",
"multiprocessing",
".",
"cpu_count",
"(",
")",
"if",
"ncpu",
">",
"1",
":",
"print",
"(",
"\"Determining the performance of {d} ensembles using {n} processors\"",
".",
"format",
"(",
"d",
"=",
"len",
"(",
"ensemble_list",
")",
",",
"n",
"=",
"ncpu",
")",
")",
"if",
"ncpu",
">",
"len",
"(",
"ensemble_list",
")",
":",
"ncpu",
"=",
"len",
"(",
"ensemble_list",
")",
"jobs",
"=",
"[",
"]",
"output_queue",
"=",
"multiprocessing",
".",
"Queue",
"(",
")",
"for",
"ensemble_chunk",
"in",
"chunker",
"(",
"ensemble_list",
",",
"ncpu",
")",
":",
"p",
"=",
"multiprocessing",
".",
"Process",
"(",
"target",
"=",
"evaluate",
",",
"args",
"=",
"(",
"molecules",
",",
"ensemble_chunk",
",",
"sort_order",
",",
"options",
",",
"output_queue",
")",
")",
"jobs",
".",
"append",
"(",
"p",
")",
"p",
".",
"start",
"(",
")",
"# collect results into a dictionary",
"results",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"jobs",
")",
")",
":",
"results",
".",
"update",
"(",
"output_queue",
".",
"get",
"(",
")",
")",
"# stop jobs",
"for",
"j",
"in",
"jobs",
":",
"j",
".",
"join",
"(",
")",
"else",
":",
"print",
"(",
"\"Determining the performance of {d} ensembles using {n} processor\"",
".",
"format",
"(",
"d",
"=",
"len",
"(",
"ensemble_list",
")",
",",
"n",
"=",
"ncpu",
")",
")",
"results",
"=",
"evaluate",
"(",
"molecules",
",",
"ensemble_list",
",",
"sort_order",
",",
"options",
")",
"# peel away the best performing ensemble",
"ensemble",
"=",
"screener",
".",
"find_best_ensemble",
"(",
"results",
",",
"options",
")",
"# write out the best performing ensemble",
"output",
".",
"write_ensemble",
"(",
"list",
"(",
"ensemble",
")",
",",
"options",
")",
"# temp 2/9/15 generate and return a list of auc values and ef at fpf = 0.001 to build up a histogram",
"nd",
"=",
"max",
"(",
"[",
"results",
"[",
"x",
"]",
".",
"ef",
".",
"keys",
"(",
")",
"for",
"x",
"in",
"results",
".",
"keys",
"(",
")",
"]",
"[",
"0",
"]",
")",
"n",
"=",
"int",
"(",
"round",
"(",
"0.001",
"*",
"nd",
")",
")",
"ef_list",
"=",
"[",
"results",
"[",
"x",
"]",
".",
"get_prop",
"(",
"n",
",",
"'ef'",
")",
"for",
"x",
"in",
"results",
".",
"keys",
"(",
")",
"]",
"auc_list",
"=",
"[",
"results",
"[",
"x",
"]",
".",
"get_prop",
"(",
"'auc'",
")",
"for",
"x",
"in",
"results",
".",
"keys",
"(",
")",
"]",
"# auc_list = [[results[x].get_prop('auc'), results[x].get_prop('ensemble')] for x in results.keys()]",
"return",
"auc_list",
",",
"ef_list"
] |
Evaluate the performance of all ensembles of fixed size.
|
[
"Evaluate",
"the",
"performance",
"of",
"all",
"ensembles",
"of",
"fixed",
"size",
"."
] |
341880b79faf8147dc9fa6e90438531cd09fabcc
|
https://github.com/rvswift/EB/blob/341880b79faf8147dc9fa6e90438531cd09fabcc/EB/builder/exhaustive/exhaustive.py#L62-L117
|
238,571
|
rvswift/EB
|
EB/builder/exhaustive/exhaustive.py
|
evaluate
|
def evaluate(molecules, ensemble_chunk, sort_order, options, output_queue=None):
"""
Evaluate VS performance of each ensemble in ensemble_chunk
"""
results = {} # {('receptor_1', ..., 'receptor_n') : ensemble storage object}
for ensemble in ensemble_chunk:
results[ensemble] = calculate_performance(molecules, ensemble, sort_order, options)
if output_queue is not None:
output_queue.put(results)
else:
return results
|
python
|
def evaluate(molecules, ensemble_chunk, sort_order, options, output_queue=None):
"""
Evaluate VS performance of each ensemble in ensemble_chunk
"""
results = {} # {('receptor_1', ..., 'receptor_n') : ensemble storage object}
for ensemble in ensemble_chunk:
results[ensemble] = calculate_performance(molecules, ensemble, sort_order, options)
if output_queue is not None:
output_queue.put(results)
else:
return results
|
[
"def",
"evaluate",
"(",
"molecules",
",",
"ensemble_chunk",
",",
"sort_order",
",",
"options",
",",
"output_queue",
"=",
"None",
")",
":",
"results",
"=",
"{",
"}",
"# {('receptor_1', ..., 'receptor_n') : ensemble storage object}",
"for",
"ensemble",
"in",
"ensemble_chunk",
":",
"results",
"[",
"ensemble",
"]",
"=",
"calculate_performance",
"(",
"molecules",
",",
"ensemble",
",",
"sort_order",
",",
"options",
")",
"if",
"output_queue",
"is",
"not",
"None",
":",
"output_queue",
".",
"put",
"(",
"results",
")",
"else",
":",
"return",
"results"
] |
Evaluate VS performance of each ensemble in ensemble_chunk
|
[
"Evaluate",
"VS",
"performance",
"of",
"each",
"ensemble",
"in",
"ensemble_chunk"
] |
341880b79faf8147dc9fa6e90438531cd09fabcc
|
https://github.com/rvswift/EB/blob/341880b79faf8147dc9fa6e90438531cd09fabcc/EB/builder/exhaustive/exhaustive.py#L119-L132
|
238,572
|
rvswift/EB
|
EB/builder/exhaustive/exhaustive.py
|
make_ensemble_list
|
def make_ensemble_list(molecules, score_field, ensemble_size):
"""
Construct ensemble list
"""
# generate list of queries
queryList = molecules[0].scores.keys()
# nchoosek
ensemble_iterator = itertools.combinations(queryList, ensemble_size)
# list of tuples: [(query1, query2), ... (queryN-1, queryN)
ensembleList = []
for ensemble in ensemble_iterator:
ensembleList.append(ensemble)
return ensembleList
|
python
|
def make_ensemble_list(molecules, score_field, ensemble_size):
"""
Construct ensemble list
"""
# generate list of queries
queryList = molecules[0].scores.keys()
# nchoosek
ensemble_iterator = itertools.combinations(queryList, ensemble_size)
# list of tuples: [(query1, query2), ... (queryN-1, queryN)
ensembleList = []
for ensemble in ensemble_iterator:
ensembleList.append(ensemble)
return ensembleList
|
[
"def",
"make_ensemble_list",
"(",
"molecules",
",",
"score_field",
",",
"ensemble_size",
")",
":",
"# generate list of queries",
"queryList",
"=",
"molecules",
"[",
"0",
"]",
".",
"scores",
".",
"keys",
"(",
")",
"# nchoosek",
"ensemble_iterator",
"=",
"itertools",
".",
"combinations",
"(",
"queryList",
",",
"ensemble_size",
")",
"# list of tuples: [(query1, query2), ... (queryN-1, queryN)",
"ensembleList",
"=",
"[",
"]",
"for",
"ensemble",
"in",
"ensemble_iterator",
":",
"ensembleList",
".",
"append",
"(",
"ensemble",
")",
"return",
"ensembleList"
] |
Construct ensemble list
|
[
"Construct",
"ensemble",
"list"
] |
341880b79faf8147dc9fa6e90438531cd09fabcc
|
https://github.com/rvswift/EB/blob/341880b79faf8147dc9fa6e90438531cd09fabcc/EB/builder/exhaustive/exhaustive.py#L166-L183
|
238,573
|
rvswift/EB
|
EB/builder/exhaustive/exhaustive.py
|
chunker
|
def chunker(ensemble_list, ncpu):
"""
Generate successive chunks of ensemble_list.
"""
# determine sublist lengths
length = int(len(ensemble_list) / ncpu)
# generator
for i in range(0, len(ensemble_list), length):
yield ensemble_list[i:i + length]
|
python
|
def chunker(ensemble_list, ncpu):
"""
Generate successive chunks of ensemble_list.
"""
# determine sublist lengths
length = int(len(ensemble_list) / ncpu)
# generator
for i in range(0, len(ensemble_list), length):
yield ensemble_list[i:i + length]
|
[
"def",
"chunker",
"(",
"ensemble_list",
",",
"ncpu",
")",
":",
"# determine sublist lengths",
"length",
"=",
"int",
"(",
"len",
"(",
"ensemble_list",
")",
"/",
"ncpu",
")",
"# generator",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"ensemble_list",
")",
",",
"length",
")",
":",
"yield",
"ensemble_list",
"[",
"i",
":",
"i",
"+",
"length",
"]"
] |
Generate successive chunks of ensemble_list.
|
[
"Generate",
"successive",
"chunks",
"of",
"ensemble_list",
"."
] |
341880b79faf8147dc9fa6e90438531cd09fabcc
|
https://github.com/rvswift/EB/blob/341880b79faf8147dc9fa6e90438531cd09fabcc/EB/builder/exhaustive/exhaustive.py#L186-L196
|
238,574
|
Brazelton-Lab/bio_utils
|
bio_utils/verifiers/fasta.py
|
fasta_verifier
|
def fasta_verifier(entries, ambiguous=False):
"""Raises error if invalid FASTA format detected
Args:
entries (list): A list of FastaEntry instances
ambiguous (bool): Permit ambiguous bases, i.e. permit non-ACGTU bases
Raises:
FormatError: Error when FASTA format incorrect with descriptive message
Example:
>>> from bio_utils.iterators import fasta_iter
>>> import os
>>> entries = r'>entry1{0}AAGGATTCG{0}' \
... r'>entry{0}AGGTCCCCCG{0}' \
... r'>entry3{0}GCCTAGC{0}'.format(os.linesep)
>>> fasta_entries = fasta_iter(iter(entries.split(os.linesep)))
>>> fasta_verifier(fasta_entries)
"""
if ambiguous:
regex = r'^>.+{0}[ACGTURYKMSWBDHVNX]+{0}$'.format(os.linesep)
else:
regex = r'^>.+{0}[ACGTU]+{0}$'.format(os.linesep)
delimiter = r'{0}'.format(os.linesep)
for entry in entries:
try:
entry_verifier([entry.write()], regex, delimiter)
except FormatError as error:
if error.part == 0:
msg = 'Unknown Header Error with {0}'.format(entry.id)
raise FormatError(message=msg)
elif error.part == 1 and ambiguous:
msg = '{0} contains a base not in ' \
'[ACGTURYKMSWBDHVNX]'.format(entry.id)
raise FormatError(message=msg)
elif error.part == 1 and not ambiguous:
msg = '{0} contains a base not in ' \
'[ACGTU]'.format(entry.id)
raise FormatError(message=msg)
else:
msg = '{0}: Unknown Error: Likely a Bug'.format(entry.id)
raise FormatError(message=msg)
|
python
|
def fasta_verifier(entries, ambiguous=False):
"""Raises error if invalid FASTA format detected
Args:
entries (list): A list of FastaEntry instances
ambiguous (bool): Permit ambiguous bases, i.e. permit non-ACGTU bases
Raises:
FormatError: Error when FASTA format incorrect with descriptive message
Example:
>>> from bio_utils.iterators import fasta_iter
>>> import os
>>> entries = r'>entry1{0}AAGGATTCG{0}' \
... r'>entry{0}AGGTCCCCCG{0}' \
... r'>entry3{0}GCCTAGC{0}'.format(os.linesep)
>>> fasta_entries = fasta_iter(iter(entries.split(os.linesep)))
>>> fasta_verifier(fasta_entries)
"""
if ambiguous:
regex = r'^>.+{0}[ACGTURYKMSWBDHVNX]+{0}$'.format(os.linesep)
else:
regex = r'^>.+{0}[ACGTU]+{0}$'.format(os.linesep)
delimiter = r'{0}'.format(os.linesep)
for entry in entries:
try:
entry_verifier([entry.write()], regex, delimiter)
except FormatError as error:
if error.part == 0:
msg = 'Unknown Header Error with {0}'.format(entry.id)
raise FormatError(message=msg)
elif error.part == 1 and ambiguous:
msg = '{0} contains a base not in ' \
'[ACGTURYKMSWBDHVNX]'.format(entry.id)
raise FormatError(message=msg)
elif error.part == 1 and not ambiguous:
msg = '{0} contains a base not in ' \
'[ACGTU]'.format(entry.id)
raise FormatError(message=msg)
else:
msg = '{0}: Unknown Error: Likely a Bug'.format(entry.id)
raise FormatError(message=msg)
|
[
"def",
"fasta_verifier",
"(",
"entries",
",",
"ambiguous",
"=",
"False",
")",
":",
"if",
"ambiguous",
":",
"regex",
"=",
"r'^>.+{0}[ACGTURYKMSWBDHVNX]+{0}$'",
".",
"format",
"(",
"os",
".",
"linesep",
")",
"else",
":",
"regex",
"=",
"r'^>.+{0}[ACGTU]+{0}$'",
".",
"format",
"(",
"os",
".",
"linesep",
")",
"delimiter",
"=",
"r'{0}'",
".",
"format",
"(",
"os",
".",
"linesep",
")",
"for",
"entry",
"in",
"entries",
":",
"try",
":",
"entry_verifier",
"(",
"[",
"entry",
".",
"write",
"(",
")",
"]",
",",
"regex",
",",
"delimiter",
")",
"except",
"FormatError",
"as",
"error",
":",
"if",
"error",
".",
"part",
"==",
"0",
":",
"msg",
"=",
"'Unknown Header Error with {0}'",
".",
"format",
"(",
"entry",
".",
"id",
")",
"raise",
"FormatError",
"(",
"message",
"=",
"msg",
")",
"elif",
"error",
".",
"part",
"==",
"1",
"and",
"ambiguous",
":",
"msg",
"=",
"'{0} contains a base not in '",
"'[ACGTURYKMSWBDHVNX]'",
".",
"format",
"(",
"entry",
".",
"id",
")",
"raise",
"FormatError",
"(",
"message",
"=",
"msg",
")",
"elif",
"error",
".",
"part",
"==",
"1",
"and",
"not",
"ambiguous",
":",
"msg",
"=",
"'{0} contains a base not in '",
"'[ACGTU]'",
".",
"format",
"(",
"entry",
".",
"id",
")",
"raise",
"FormatError",
"(",
"message",
"=",
"msg",
")",
"else",
":",
"msg",
"=",
"'{0}: Unknown Error: Likely a Bug'",
".",
"format",
"(",
"entry",
".",
"id",
")",
"raise",
"FormatError",
"(",
"message",
"=",
"msg",
")"
] |
Raises error if invalid FASTA format detected
Args:
entries (list): A list of FastaEntry instances
ambiguous (bool): Permit ambiguous bases, i.e. permit non-ACGTU bases
Raises:
FormatError: Error when FASTA format incorrect with descriptive message
Example:
>>> from bio_utils.iterators import fasta_iter
>>> import os
>>> entries = r'>entry1{0}AAGGATTCG{0}' \
... r'>entry{0}AGGTCCCCCG{0}' \
... r'>entry3{0}GCCTAGC{0}'.format(os.linesep)
>>> fasta_entries = fasta_iter(iter(entries.split(os.linesep)))
>>> fasta_verifier(fasta_entries)
|
[
"Raises",
"error",
"if",
"invalid",
"FASTA",
"format",
"detected"
] |
5a7ddf13ee0bf4baaaeb6b2b99e01bf74aa132b7
|
https://github.com/Brazelton-Lab/bio_utils/blob/5a7ddf13ee0bf4baaaeb6b2b99e01bf74aa132b7/bio_utils/verifiers/fasta.py#L46-L90
|
238,575
|
s-m-i-t-a/railroad
|
railroad/guard.py
|
guard
|
def guard(params, guardian, error_class=GuardError, message=''):
'''
A guard function - check parameters
with guardian function on decorated function
:param tuple or string params: guarded function parameter/s
:param function guardian: verifying the conditions for the selected parameter
:param Exception error_class: raised class when guardian return false
:param string message: error message
'''
params = [params] if isinstance(params, string_types) else params
def guard_decorate(f):
@wraps(f)
def _guard_decorate(*args, **kwargs):
if guardian(**_params(f, args, kwargs, params)):
return f(*args, **kwargs)
else:
raise error_class(message)
return _guard_decorate
return guard_decorate
|
python
|
def guard(params, guardian, error_class=GuardError, message=''):
'''
A guard function - check parameters
with guardian function on decorated function
:param tuple or string params: guarded function parameter/s
:param function guardian: verifying the conditions for the selected parameter
:param Exception error_class: raised class when guardian return false
:param string message: error message
'''
params = [params] if isinstance(params, string_types) else params
def guard_decorate(f):
@wraps(f)
def _guard_decorate(*args, **kwargs):
if guardian(**_params(f, args, kwargs, params)):
return f(*args, **kwargs)
else:
raise error_class(message)
return _guard_decorate
return guard_decorate
|
[
"def",
"guard",
"(",
"params",
",",
"guardian",
",",
"error_class",
"=",
"GuardError",
",",
"message",
"=",
"''",
")",
":",
"params",
"=",
"[",
"params",
"]",
"if",
"isinstance",
"(",
"params",
",",
"string_types",
")",
"else",
"params",
"def",
"guard_decorate",
"(",
"f",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"_guard_decorate",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"guardian",
"(",
"*",
"*",
"_params",
"(",
"f",
",",
"args",
",",
"kwargs",
",",
"params",
")",
")",
":",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"raise",
"error_class",
"(",
"message",
")",
"return",
"_guard_decorate",
"return",
"guard_decorate"
] |
A guard function - check parameters
with guardian function on decorated function
:param tuple or string params: guarded function parameter/s
:param function guardian: verifying the conditions for the selected parameter
:param Exception error_class: raised class when guardian return false
:param string message: error message
|
[
"A",
"guard",
"function",
"-",
"check",
"parameters",
"with",
"guardian",
"function",
"on",
"decorated",
"function"
] |
ddb4afa018b8523b5d8c3a86e55388d1ea0ab37c
|
https://github.com/s-m-i-t-a/railroad/blob/ddb4afa018b8523b5d8c3a86e55388d1ea0ab37c/railroad/guard.py#L36-L56
|
238,576
|
avanwyk/cipy
|
examples/pso_optimizer.py
|
main
|
def main(dimension, iterations):
""" Main function for PSO optimizer example.
Instantiate PSOOptimizer to optimize 30-dimensional spherical function.
"""
optimizer = PSOOptimizer()
solution = optimizer.minimize(sphere, -5.12, 5.12, dimension,
max_iterations(iterations))
return solution, optimizer
|
python
|
def main(dimension, iterations):
""" Main function for PSO optimizer example.
Instantiate PSOOptimizer to optimize 30-dimensional spherical function.
"""
optimizer = PSOOptimizer()
solution = optimizer.minimize(sphere, -5.12, 5.12, dimension,
max_iterations(iterations))
return solution, optimizer
|
[
"def",
"main",
"(",
"dimension",
",",
"iterations",
")",
":",
"optimizer",
"=",
"PSOOptimizer",
"(",
")",
"solution",
"=",
"optimizer",
".",
"minimize",
"(",
"sphere",
",",
"-",
"5.12",
",",
"5.12",
",",
"dimension",
",",
"max_iterations",
"(",
"iterations",
")",
")",
"return",
"solution",
",",
"optimizer"
] |
Main function for PSO optimizer example.
Instantiate PSOOptimizer to optimize 30-dimensional spherical function.
|
[
"Main",
"function",
"for",
"PSO",
"optimizer",
"example",
"."
] |
98450dd01767b3615c113e50dc396f135e177b29
|
https://github.com/avanwyk/cipy/blob/98450dd01767b3615c113e50dc396f135e177b29/examples/pso_optimizer.py#L20-L28
|
238,577
|
mapmyfitness/jtime
|
jtime/git_ext.py
|
GIT.get_last_commit_message
|
def get_last_commit_message(self):
"""
Gets the last commit message on the active branch
Returns None if not in a git repo
"""
# Check if we are currently in a repo
try:
branch = self.active_branch
return self.commit(branch).message
except InvalidGitRepositoryError:
print "Not in a git repo"
return None
|
python
|
def get_last_commit_message(self):
"""
Gets the last commit message on the active branch
Returns None if not in a git repo
"""
# Check if we are currently in a repo
try:
branch = self.active_branch
return self.commit(branch).message
except InvalidGitRepositoryError:
print "Not in a git repo"
return None
|
[
"def",
"get_last_commit_message",
"(",
"self",
")",
":",
"# Check if we are currently in a repo",
"try",
":",
"branch",
"=",
"self",
".",
"active_branch",
"return",
"self",
".",
"commit",
"(",
"branch",
")",
".",
"message",
"except",
"InvalidGitRepositoryError",
":",
"print",
"\"Not in a git repo\"",
"return",
"None"
] |
Gets the last commit message on the active branch
Returns None if not in a git repo
|
[
"Gets",
"the",
"last",
"commit",
"message",
"on",
"the",
"active",
"branch"
] |
402fb6b40ac7a78c23fd02fac50c6dbe49e5ebfd
|
https://github.com/mapmyfitness/jtime/blob/402fb6b40ac7a78c23fd02fac50c6dbe49e5ebfd/jtime/git_ext.py#L30-L42
|
238,578
|
mapmyfitness/jtime
|
jtime/git_ext.py
|
GIT.get_last_modified_timestamp
|
def get_last_modified_timestamp(self):
"""
Looks at the files in a git root directory and grabs the last modified timestamp
"""
cmd = "find . -print0 | xargs -0 stat -f '%T@ %p' | sort -n | tail -1 | cut -f2- -d' '"
ps = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
output = ps.communicate()[0]
print output
|
python
|
def get_last_modified_timestamp(self):
"""
Looks at the files in a git root directory and grabs the last modified timestamp
"""
cmd = "find . -print0 | xargs -0 stat -f '%T@ %p' | sort -n | tail -1 | cut -f2- -d' '"
ps = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
output = ps.communicate()[0]
print output
|
[
"def",
"get_last_modified_timestamp",
"(",
"self",
")",
":",
"cmd",
"=",
"\"find . -print0 | xargs -0 stat -f '%T@ %p' | sort -n | tail -1 | cut -f2- -d' '\"",
"ps",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"shell",
"=",
"True",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
")",
"output",
"=",
"ps",
".",
"communicate",
"(",
")",
"[",
"0",
"]",
"print",
"output"
] |
Looks at the files in a git root directory and grabs the last modified timestamp
|
[
"Looks",
"at",
"the",
"files",
"in",
"a",
"git",
"root",
"directory",
"and",
"grabs",
"the",
"last",
"modified",
"timestamp"
] |
402fb6b40ac7a78c23fd02fac50c6dbe49e5ebfd
|
https://github.com/mapmyfitness/jtime/blob/402fb6b40ac7a78c23fd02fac50c6dbe49e5ebfd/jtime/git_ext.py#L44-L51
|
238,579
|
rca/cmdline
|
src/cmdline/config.py
|
find_config_root
|
def find_config_root(path=sys.argv[0]):
"""
Finds config root relative to the given file path
"""
dirname = os.path.dirname(path)
lastdirname = None
while dirname != lastdirname:
config_root = os.path.join(dirname, 'config')
if os.path.exists(config_root):
return config_root
lastdirname, dirname = dirname, os.path.dirname(dirname)
|
python
|
def find_config_root(path=sys.argv[0]):
"""
Finds config root relative to the given file path
"""
dirname = os.path.dirname(path)
lastdirname = None
while dirname != lastdirname:
config_root = os.path.join(dirname, 'config')
if os.path.exists(config_root):
return config_root
lastdirname, dirname = dirname, os.path.dirname(dirname)
|
[
"def",
"find_config_root",
"(",
"path",
"=",
"sys",
".",
"argv",
"[",
"0",
"]",
")",
":",
"dirname",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
"lastdirname",
"=",
"None",
"while",
"dirname",
"!=",
"lastdirname",
":",
"config_root",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dirname",
",",
"'config'",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"config_root",
")",
":",
"return",
"config_root",
"lastdirname",
",",
"dirname",
"=",
"dirname",
",",
"os",
".",
"path",
".",
"dirname",
"(",
"dirname",
")"
] |
Finds config root relative to the given file path
|
[
"Finds",
"config",
"root",
"relative",
"to",
"the",
"given",
"file",
"path"
] |
c01990aa1781c4d435c91c67962fb6ad92b7b579
|
https://github.com/rca/cmdline/blob/c01990aa1781c4d435c91c67962fb6ad92b7b579/src/cmdline/config.py#L8-L20
|
238,580
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/configeditor.py
|
ConfigObjModel.restore_default
|
def restore_default(self, index):
"""Set the value of the given index row to its default
:param index:
:type index:
:returns:
:rtype:
:raises:
"""
spec = self.get_configspec_str(index)
if spec is None or isinstance(spec, Section):
return
try:
default = self._vld.get_default_value(spec)
defaultstr = self._val_to_str(default)
self.setData(index, defaultstr)
except KeyError:
raise ConfigError("Missing Default Value in spec: \"%s\"" % spec)
|
python
|
def restore_default(self, index):
"""Set the value of the given index row to its default
:param index:
:type index:
:returns:
:rtype:
:raises:
"""
spec = self.get_configspec_str(index)
if spec is None or isinstance(spec, Section):
return
try:
default = self._vld.get_default_value(spec)
defaultstr = self._val_to_str(default)
self.setData(index, defaultstr)
except KeyError:
raise ConfigError("Missing Default Value in spec: \"%s\"" % spec)
|
[
"def",
"restore_default",
"(",
"self",
",",
"index",
")",
":",
"spec",
"=",
"self",
".",
"get_configspec_str",
"(",
"index",
")",
"if",
"spec",
"is",
"None",
"or",
"isinstance",
"(",
"spec",
",",
"Section",
")",
":",
"return",
"try",
":",
"default",
"=",
"self",
".",
"_vld",
".",
"get_default_value",
"(",
"spec",
")",
"defaultstr",
"=",
"self",
".",
"_val_to_str",
"(",
"default",
")",
"self",
".",
"setData",
"(",
"index",
",",
"defaultstr",
")",
"except",
"KeyError",
":",
"raise",
"ConfigError",
"(",
"\"Missing Default Value in spec: \\\"%s\\\"\"",
"%",
"spec",
")"
] |
Set the value of the given index row to its default
:param index:
:type index:
:returns:
:rtype:
:raises:
|
[
"Set",
"the",
"value",
"of",
"the",
"given",
"index",
"row",
"to",
"its",
"default"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/configeditor.py#L135-L152
|
238,581
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/configeditor.py
|
ConfigObjModel.get_value
|
def get_value(self, index):
""" Return the value of the given index
The index stores the section as internal pointer.
The row of the index determines the key.
The key is used on the section to return the value
:param index: The QModelIndex
:type index: QModelIndex
:returns: The value for the given index
"""
p = index.internalPointer()
k = self.get_key(p, index.row())
return p[k]
|
python
|
def get_value(self, index):
""" Return the value of the given index
The index stores the section as internal pointer.
The row of the index determines the key.
The key is used on the section to return the value
:param index: The QModelIndex
:type index: QModelIndex
:returns: The value for the given index
"""
p = index.internalPointer()
k = self.get_key(p, index.row())
return p[k]
|
[
"def",
"get_value",
"(",
"self",
",",
"index",
")",
":",
"p",
"=",
"index",
".",
"internalPointer",
"(",
")",
"k",
"=",
"self",
".",
"get_key",
"(",
"p",
",",
"index",
".",
"row",
"(",
")",
")",
"return",
"p",
"[",
"k",
"]"
] |
Return the value of the given index
The index stores the section as internal pointer.
The row of the index determines the key.
The key is used on the section to return the value
:param index: The QModelIndex
:type index: QModelIndex
:returns: The value for the given index
|
[
"Return",
"the",
"value",
"of",
"the",
"given",
"index"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/configeditor.py#L211-L224
|
238,582
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/configeditor.py
|
ConfigObjModel.get_configspec_str
|
def get_configspec_str(self, index):
""" Return the config spec string of the given index
The index stores the section as internal pointer.
The row of the index determines the key.
The section stores the spec in its configspec attribute
The key is used on the configspec attribute to return the spec
:param index: The QModelIndex
:type index: QModelIndex
:returns: The spec for the given index or None
"""
p = index.internalPointer()
if p is None:
return
spec = p.configspec
if spec is None:
return None
k = self.get_key(p, index.row())
try:
return spec[k]
except KeyError:
return None
|
python
|
def get_configspec_str(self, index):
""" Return the config spec string of the given index
The index stores the section as internal pointer.
The row of the index determines the key.
The section stores the spec in its configspec attribute
The key is used on the configspec attribute to return the spec
:param index: The QModelIndex
:type index: QModelIndex
:returns: The spec for the given index or None
"""
p = index.internalPointer()
if p is None:
return
spec = p.configspec
if spec is None:
return None
k = self.get_key(p, index.row())
try:
return spec[k]
except KeyError:
return None
|
[
"def",
"get_configspec_str",
"(",
"self",
",",
"index",
")",
":",
"p",
"=",
"index",
".",
"internalPointer",
"(",
")",
"if",
"p",
"is",
"None",
":",
"return",
"spec",
"=",
"p",
".",
"configspec",
"if",
"spec",
"is",
"None",
":",
"return",
"None",
"k",
"=",
"self",
".",
"get_key",
"(",
"p",
",",
"index",
".",
"row",
"(",
")",
")",
"try",
":",
"return",
"spec",
"[",
"k",
"]",
"except",
"KeyError",
":",
"return",
"None"
] |
Return the config spec string of the given index
The index stores the section as internal pointer.
The row of the index determines the key.
The section stores the spec in its configspec attribute
The key is used on the configspec attribute to return the spec
:param index: The QModelIndex
:type index: QModelIndex
:returns: The spec for the given index or None
|
[
"Return",
"the",
"config",
"spec",
"string",
"of",
"the",
"given",
"index"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/configeditor.py#L226-L248
|
238,583
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/configeditor.py
|
ConfigObjModel._val_to_str
|
def _val_to_str(self, value):
"""Converts the value to a string that will be handled correctly by the confobj
:param value: the value to parse
:type value: something configobj supports
:returns: str
:rtype: str
:raises: None
When the value is a list, it will be converted to a string that can be parsed to
the same list again.
"""
# might be a list value
# then represent it 'nicer' so that when we edit it, the same value will return
if isinstance(value, list):
# so we have a list value. the default str(v) would produce something like: ['a', 'b']
# handling such a value is not possible. it should be: 'a', 'b'
# so we have to convert it to a string but we have to make sure, we do not loose quotes
# even when values are integers, they get quoted. thats alright. the config obj will parse them correctly
return ', '.join("'%s'" % str(i) for i in value)
return str(value)
|
python
|
def _val_to_str(self, value):
"""Converts the value to a string that will be handled correctly by the confobj
:param value: the value to parse
:type value: something configobj supports
:returns: str
:rtype: str
:raises: None
When the value is a list, it will be converted to a string that can be parsed to
the same list again.
"""
# might be a list value
# then represent it 'nicer' so that when we edit it, the same value will return
if isinstance(value, list):
# so we have a list value. the default str(v) would produce something like: ['a', 'b']
# handling such a value is not possible. it should be: 'a', 'b'
# so we have to convert it to a string but we have to make sure, we do not loose quotes
# even when values are integers, they get quoted. thats alright. the config obj will parse them correctly
return ', '.join("'%s'" % str(i) for i in value)
return str(value)
|
[
"def",
"_val_to_str",
"(",
"self",
",",
"value",
")",
":",
"# might be a list value",
"# then represent it 'nicer' so that when we edit it, the same value will return",
"if",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"# so we have a list value. the default str(v) would produce something like: ['a', 'b']",
"# handling such a value is not possible. it should be: 'a', 'b'",
"# so we have to convert it to a string but we have to make sure, we do not loose quotes",
"# even when values are integers, they get quoted. thats alright. the config obj will parse them correctly",
"return",
"', '",
".",
"join",
"(",
"\"'%s'\"",
"%",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"value",
")",
"return",
"str",
"(",
"value",
")"
] |
Converts the value to a string that will be handled correctly by the confobj
:param value: the value to parse
:type value: something configobj supports
:returns: str
:rtype: str
:raises: None
When the value is a list, it will be converted to a string that can be parsed to
the same list again.
|
[
"Converts",
"the",
"value",
"to",
"a",
"string",
"that",
"will",
"be",
"handled",
"correctly",
"by",
"the",
"confobj"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/configeditor.py#L292-L312
|
238,584
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/configeditor.py
|
InifilesModel.set_index_edited
|
def set_index_edited(self, index, edited):
"""Set whether the conf was edited or not.
Edited files will be displayed with a \'*\'
:param index: the index that was edited
:type index: QModelIndex
:param edited: if the file was edited, set edited to True, else False
:type edited: bool
:returns: None
:rtype: None
:raises: None
"""
self.__edited[index.row()] = edited
self.dataChanged.emit(index, index)
|
python
|
def set_index_edited(self, index, edited):
"""Set whether the conf was edited or not.
Edited files will be displayed with a \'*\'
:param index: the index that was edited
:type index: QModelIndex
:param edited: if the file was edited, set edited to True, else False
:type edited: bool
:returns: None
:rtype: None
:raises: None
"""
self.__edited[index.row()] = edited
self.dataChanged.emit(index, index)
|
[
"def",
"set_index_edited",
"(",
"self",
",",
"index",
",",
"edited",
")",
":",
"self",
".",
"__edited",
"[",
"index",
".",
"row",
"(",
")",
"]",
"=",
"edited",
"self",
".",
"dataChanged",
".",
"emit",
"(",
"index",
",",
"index",
")"
] |
Set whether the conf was edited or not.
Edited files will be displayed with a \'*\'
:param index: the index that was edited
:type index: QModelIndex
:param edited: if the file was edited, set edited to True, else False
:type edited: bool
:returns: None
:rtype: None
:raises: None
|
[
"Set",
"whether",
"the",
"conf",
"was",
"edited",
"or",
"not",
"."
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/configeditor.py#L390-L404
|
238,585
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/configeditor.py
|
InifilesModel.get_edited
|
def get_edited(self, ):
"""Return all indices that were modified
:returns: list of indices for modified confs
:rtype: list of QModelIndex
:raises: None
"""
modified = []
for i in range(len(self.__edited)):
if self.__edited[i]:
modified.append(self.__configs[i])
return modified
|
python
|
def get_edited(self, ):
"""Return all indices that were modified
:returns: list of indices for modified confs
:rtype: list of QModelIndex
:raises: None
"""
modified = []
for i in range(len(self.__edited)):
if self.__edited[i]:
modified.append(self.__configs[i])
return modified
|
[
"def",
"get_edited",
"(",
"self",
",",
")",
":",
"modified",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"__edited",
")",
")",
":",
"if",
"self",
".",
"__edited",
"[",
"i",
"]",
":",
"modified",
".",
"append",
"(",
"self",
".",
"__configs",
"[",
"i",
"]",
")",
"return",
"modified"
] |
Return all indices that were modified
:returns: list of indices for modified confs
:rtype: list of QModelIndex
:raises: None
|
[
"Return",
"all",
"indices",
"that",
"were",
"modified"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/configeditor.py#L406-L417
|
238,586
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/configeditor.py
|
InifilesModel.validate
|
def validate(self, index):
"""Validate the conf for the given index
:param index: the index of the model to validate
:type index: QModelIndex
:returns: True if passed and a False/True dict representing fail/pass. The structure follows the configobj. If the configobj does not have a configspec True is returned.
:rtype: True|Dict
:raises: None
"""
c = self.__configs[index.row()]
if c.configspec is None:
return True
else:
return c.validate(self.vld)
|
python
|
def validate(self, index):
"""Validate the conf for the given index
:param index: the index of the model to validate
:type index: QModelIndex
:returns: True if passed and a False/True dict representing fail/pass. The structure follows the configobj. If the configobj does not have a configspec True is returned.
:rtype: True|Dict
:raises: None
"""
c = self.__configs[index.row()]
if c.configspec is None:
return True
else:
return c.validate(self.vld)
|
[
"def",
"validate",
"(",
"self",
",",
"index",
")",
":",
"c",
"=",
"self",
".",
"__configs",
"[",
"index",
".",
"row",
"(",
")",
"]",
"if",
"c",
".",
"configspec",
"is",
"None",
":",
"return",
"True",
"else",
":",
"return",
"c",
".",
"validate",
"(",
"self",
".",
"vld",
")"
] |
Validate the conf for the given index
:param index: the index of the model to validate
:type index: QModelIndex
:returns: True if passed and a False/True dict representing fail/pass. The structure follows the configobj. If the configobj does not have a configspec True is returned.
:rtype: True|Dict
:raises: None
|
[
"Validate",
"the",
"conf",
"for",
"the",
"given",
"index"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/configeditor.py#L419-L432
|
238,587
|
ubernostrum/django-flashpolicies
|
flashpolicies/views.py
|
metapolicy
|
def metapolicy(request, permitted, domains=None):
"""
Serves a cross-domain policy which can allow other policies
to exist on the same domain.
Note that this view, if used, must be the master policy for the
domain, and so must be served from the URL ``/crossdomain.xml`` on
the domain: setting metapolicy information in other policy files
is forbidden by the cross-domain policy specification.
**Required arguments:**
``permitted``
A string indicating the extent to which other policies are
permitted. A set of constants is available in
``flashpolicies.policies``, defining acceptable values for
this argument.
**Optional arguments:**
``domains``
A list of domains from which to allow access. Each value may
be either a domain name (e.g., ``example.com``) or a wildcard
(e.g., ``*.example.com``). Due to serious potential security
issues, it is strongly recommended that you not use wildcard
domain values.
"""
if domains is None:
domains = []
policy = policies.Policy(*domains)
policy.metapolicy(permitted)
return serve(request, policy)
|
python
|
def metapolicy(request, permitted, domains=None):
"""
Serves a cross-domain policy which can allow other policies
to exist on the same domain.
Note that this view, if used, must be the master policy for the
domain, and so must be served from the URL ``/crossdomain.xml`` on
the domain: setting metapolicy information in other policy files
is forbidden by the cross-domain policy specification.
**Required arguments:**
``permitted``
A string indicating the extent to which other policies are
permitted. A set of constants is available in
``flashpolicies.policies``, defining acceptable values for
this argument.
**Optional arguments:**
``domains``
A list of domains from which to allow access. Each value may
be either a domain name (e.g., ``example.com``) or a wildcard
(e.g., ``*.example.com``). Due to serious potential security
issues, it is strongly recommended that you not use wildcard
domain values.
"""
if domains is None:
domains = []
policy = policies.Policy(*domains)
policy.metapolicy(permitted)
return serve(request, policy)
|
[
"def",
"metapolicy",
"(",
"request",
",",
"permitted",
",",
"domains",
"=",
"None",
")",
":",
"if",
"domains",
"is",
"None",
":",
"domains",
"=",
"[",
"]",
"policy",
"=",
"policies",
".",
"Policy",
"(",
"*",
"domains",
")",
"policy",
".",
"metapolicy",
"(",
"permitted",
")",
"return",
"serve",
"(",
"request",
",",
"policy",
")"
] |
Serves a cross-domain policy which can allow other policies
to exist on the same domain.
Note that this view, if used, must be the master policy for the
domain, and so must be served from the URL ``/crossdomain.xml`` on
the domain: setting metapolicy information in other policy files
is forbidden by the cross-domain policy specification.
**Required arguments:**
``permitted``
A string indicating the extent to which other policies are
permitted. A set of constants is available in
``flashpolicies.policies``, defining acceptable values for
this argument.
**Optional arguments:**
``domains``
A list of domains from which to allow access. Each value may
be either a domain name (e.g., ``example.com``) or a wildcard
(e.g., ``*.example.com``). Due to serious potential security
issues, it is strongly recommended that you not use wildcard
domain values.
|
[
"Serves",
"a",
"cross",
"-",
"domain",
"policy",
"which",
"can",
"allow",
"other",
"policies",
"to",
"exist",
"on",
"the",
"same",
"domain",
"."
] |
fb04693504186dde859cce97bad6e83d2b380dc6
|
https://github.com/ubernostrum/django-flashpolicies/blob/fb04693504186dde859cce97bad6e83d2b380dc6/flashpolicies/views.py#L78-L110
|
238,588
|
RealGeeks/batman
|
batman/run.py
|
_run_popen
|
def _run_popen(command, print_output=False):
"""
subprocess has the most terrible interface ever.
Envoy is an option but too heavyweight for this.
This is a convenience wrapper around subprocess.Popen.
Also, this merges STDOUT and STDERR together, since
there isn't a good way of interleaving them without
threads.
"""
output = ''
po = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
fcntl.fcntl(
po.stdout.fileno(),
fcntl.F_SETFL,
fcntl.fcntl(po.stdout.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK,
)
while po.poll() is None:
stream = po.stdout
readx = select.select([stream.fileno()], [], [])[0]
if readx:
chunk = stream.read()
output += chunk
if print_output:
print chunk
return Result(output, po.returncode)
|
python
|
def _run_popen(command, print_output=False):
"""
subprocess has the most terrible interface ever.
Envoy is an option but too heavyweight for this.
This is a convenience wrapper around subprocess.Popen.
Also, this merges STDOUT and STDERR together, since
there isn't a good way of interleaving them without
threads.
"""
output = ''
po = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
fcntl.fcntl(
po.stdout.fileno(),
fcntl.F_SETFL,
fcntl.fcntl(po.stdout.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK,
)
while po.poll() is None:
stream = po.stdout
readx = select.select([stream.fileno()], [], [])[0]
if readx:
chunk = stream.read()
output += chunk
if print_output:
print chunk
return Result(output, po.returncode)
|
[
"def",
"_run_popen",
"(",
"command",
",",
"print_output",
"=",
"False",
")",
":",
"output",
"=",
"''",
"po",
"=",
"subprocess",
".",
"Popen",
"(",
"command",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
",",
")",
"fcntl",
".",
"fcntl",
"(",
"po",
".",
"stdout",
".",
"fileno",
"(",
")",
",",
"fcntl",
".",
"F_SETFL",
",",
"fcntl",
".",
"fcntl",
"(",
"po",
".",
"stdout",
".",
"fileno",
"(",
")",
",",
"fcntl",
".",
"F_GETFL",
")",
"|",
"os",
".",
"O_NONBLOCK",
",",
")",
"while",
"po",
".",
"poll",
"(",
")",
"is",
"None",
":",
"stream",
"=",
"po",
".",
"stdout",
"readx",
"=",
"select",
".",
"select",
"(",
"[",
"stream",
".",
"fileno",
"(",
")",
"]",
",",
"[",
"]",
",",
"[",
"]",
")",
"[",
"0",
"]",
"if",
"readx",
":",
"chunk",
"=",
"stream",
".",
"read",
"(",
")",
"output",
"+=",
"chunk",
"if",
"print_output",
":",
"print",
"chunk",
"return",
"Result",
"(",
"output",
",",
"po",
".",
"returncode",
")"
] |
subprocess has the most terrible interface ever.
Envoy is an option but too heavyweight for this.
This is a convenience wrapper around subprocess.Popen.
Also, this merges STDOUT and STDERR together, since
there isn't a good way of interleaving them without
threads.
|
[
"subprocess",
"has",
"the",
"most",
"terrible",
"interface",
"ever",
".",
"Envoy",
"is",
"an",
"option",
"but",
"too",
"heavyweight",
"for",
"this",
".",
"This",
"is",
"a",
"convenience",
"wrapper",
"around",
"subprocess",
".",
"Popen",
"."
] |
ac61d193cbc6cc736f61ae8cf5e933a576b50698
|
https://github.com/RealGeeks/batman/blob/ac61d193cbc6cc736f61ae8cf5e933a576b50698/batman/run.py#L19-L48
|
238,589
|
emilydolson/avida-spatial-tools
|
avidaspatial/patch_analysis.py
|
perimeter
|
def perimeter(patch, world_size=(60, 60),
neighbor_func=get_rook_neighbors_toroidal):
"""
Count cell faces in patch that do not connect to part of patch.
This preserves various square geometry features that would not
be preserved by merely counting the number of cells that touch
an edge.
"""
edge = 0
patch = set([tuple(i) for i in patch])
for cell in patch:
neighbors = neighbor_func(cell, world_size)
neighbors = [n for n in neighbors if n not in patch]
edge += len(neighbors)
return edge
|
python
|
def perimeter(patch, world_size=(60, 60),
neighbor_func=get_rook_neighbors_toroidal):
"""
Count cell faces in patch that do not connect to part of patch.
This preserves various square geometry features that would not
be preserved by merely counting the number of cells that touch
an edge.
"""
edge = 0
patch = set([tuple(i) for i in patch])
for cell in patch:
neighbors = neighbor_func(cell, world_size)
neighbors = [n for n in neighbors if n not in patch]
edge += len(neighbors)
return edge
|
[
"def",
"perimeter",
"(",
"patch",
",",
"world_size",
"=",
"(",
"60",
",",
"60",
")",
",",
"neighbor_func",
"=",
"get_rook_neighbors_toroidal",
")",
":",
"edge",
"=",
"0",
"patch",
"=",
"set",
"(",
"[",
"tuple",
"(",
"i",
")",
"for",
"i",
"in",
"patch",
"]",
")",
"for",
"cell",
"in",
"patch",
":",
"neighbors",
"=",
"neighbor_func",
"(",
"cell",
",",
"world_size",
")",
"neighbors",
"=",
"[",
"n",
"for",
"n",
"in",
"neighbors",
"if",
"n",
"not",
"in",
"patch",
"]",
"edge",
"+=",
"len",
"(",
"neighbors",
")",
"return",
"edge"
] |
Count cell faces in patch that do not connect to part of patch.
This preserves various square geometry features that would not
be preserved by merely counting the number of cells that touch
an edge.
|
[
"Count",
"cell",
"faces",
"in",
"patch",
"that",
"do",
"not",
"connect",
"to",
"part",
"of",
"patch",
".",
"This",
"preserves",
"various",
"square",
"geometry",
"features",
"that",
"would",
"not",
"be",
"preserved",
"by",
"merely",
"counting",
"the",
"number",
"of",
"cells",
"that",
"touch",
"an",
"edge",
"."
] |
7beb0166ccefad5fa722215b030ac2a53d62b59e
|
https://github.com/emilydolson/avida-spatial-tools/blob/7beb0166ccefad5fa722215b030ac2a53d62b59e/avidaspatial/patch_analysis.py#L89-L104
|
238,590
|
emilydolson/avida-spatial-tools
|
avidaspatial/patch_analysis.py
|
traverse_core
|
def traverse_core(core_area, world_size=(60, 60),
neighbor_func=get_moore_neighbors_toroidal):
"""
Treat cells in core_area like a graph and traverse it to
see how many connected components there are.
"""
if not core_area:
return []
core_area = [tuple(i) for i in core_area]
curr = core_area[0]
core_area = set(core_area[1:])
to_explore = []
cores = [[curr]]
while core_area:
neighbors = neighbor_func(curr, world_size)
for n in neighbors:
if n in core_area:
core_area.remove(n)
to_explore.append(n)
cores[-1].append(n)
if to_explore:
curr = to_explore.pop()
else:
curr = core_area.pop()
cores.append([curr])
return cores
|
python
|
def traverse_core(core_area, world_size=(60, 60),
neighbor_func=get_moore_neighbors_toroidal):
"""
Treat cells in core_area like a graph and traverse it to
see how many connected components there are.
"""
if not core_area:
return []
core_area = [tuple(i) for i in core_area]
curr = core_area[0]
core_area = set(core_area[1:])
to_explore = []
cores = [[curr]]
while core_area:
neighbors = neighbor_func(curr, world_size)
for n in neighbors:
if n in core_area:
core_area.remove(n)
to_explore.append(n)
cores[-1].append(n)
if to_explore:
curr = to_explore.pop()
else:
curr = core_area.pop()
cores.append([curr])
return cores
|
[
"def",
"traverse_core",
"(",
"core_area",
",",
"world_size",
"=",
"(",
"60",
",",
"60",
")",
",",
"neighbor_func",
"=",
"get_moore_neighbors_toroidal",
")",
":",
"if",
"not",
"core_area",
":",
"return",
"[",
"]",
"core_area",
"=",
"[",
"tuple",
"(",
"i",
")",
"for",
"i",
"in",
"core_area",
"]",
"curr",
"=",
"core_area",
"[",
"0",
"]",
"core_area",
"=",
"set",
"(",
"core_area",
"[",
"1",
":",
"]",
")",
"to_explore",
"=",
"[",
"]",
"cores",
"=",
"[",
"[",
"curr",
"]",
"]",
"while",
"core_area",
":",
"neighbors",
"=",
"neighbor_func",
"(",
"curr",
",",
"world_size",
")",
"for",
"n",
"in",
"neighbors",
":",
"if",
"n",
"in",
"core_area",
":",
"core_area",
".",
"remove",
"(",
"n",
")",
"to_explore",
".",
"append",
"(",
"n",
")",
"cores",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"n",
")",
"if",
"to_explore",
":",
"curr",
"=",
"to_explore",
".",
"pop",
"(",
")",
"else",
":",
"curr",
"=",
"core_area",
".",
"pop",
"(",
")",
"cores",
".",
"append",
"(",
"[",
"curr",
"]",
")",
"return",
"cores"
] |
Treat cells in core_area like a graph and traverse it to
see how many connected components there are.
|
[
"Treat",
"cells",
"in",
"core_area",
"like",
"a",
"graph",
"and",
"traverse",
"it",
"to",
"see",
"how",
"many",
"connected",
"components",
"there",
"are",
"."
] |
7beb0166ccefad5fa722215b030ac2a53d62b59e
|
https://github.com/emilydolson/avida-spatial-tools/blob/7beb0166ccefad5fa722215b030ac2a53d62b59e/avidaspatial/patch_analysis.py#L376-L406
|
238,591
|
samisalkosuo/pipapp
|
pipapp/pipapp.py
|
parse_command_line_args
|
def parse_command_line_args():
"""parse command line args"""
parser = argparse.ArgumentParser(description='PipApp. {}'.format(DESCRIPTION))
parser.add_argument(
'-d', '--dir',
metavar='DIR',
help='Root directory where to create new project files and dirs. Default is current directory.'
)
parser.add_argument(
'-v,', '--version',
action='version',
version='{} v{}'.format(PROGRAMNAME, VERSION)
)
parser.add_argument(
"project_name",
metavar='PROJECTNAME',
help="Name of the generated Project. Has to be a valid Python identifier."
)
return parser.parse_args()
|
python
|
def parse_command_line_args():
"""parse command line args"""
parser = argparse.ArgumentParser(description='PipApp. {}'.format(DESCRIPTION))
parser.add_argument(
'-d', '--dir',
metavar='DIR',
help='Root directory where to create new project files and dirs. Default is current directory.'
)
parser.add_argument(
'-v,', '--version',
action='version',
version='{} v{}'.format(PROGRAMNAME, VERSION)
)
parser.add_argument(
"project_name",
metavar='PROJECTNAME',
help="Name of the generated Project. Has to be a valid Python identifier."
)
return parser.parse_args()
|
[
"def",
"parse_command_line_args",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'PipApp. {}'",
".",
"format",
"(",
"DESCRIPTION",
")",
")",
"parser",
".",
"add_argument",
"(",
"'-d'",
",",
"'--dir'",
",",
"metavar",
"=",
"'DIR'",
",",
"help",
"=",
"'Root directory where to create new project files and dirs. Default is current directory.'",
")",
"parser",
".",
"add_argument",
"(",
"'-v,'",
",",
"'--version'",
",",
"action",
"=",
"'version'",
",",
"version",
"=",
"'{} v{}'",
".",
"format",
"(",
"PROGRAMNAME",
",",
"VERSION",
")",
")",
"parser",
".",
"add_argument",
"(",
"\"project_name\"",
",",
"metavar",
"=",
"'PROJECTNAME'",
",",
"help",
"=",
"\"Name of the generated Project. Has to be a valid Python identifier.\"",
")",
"return",
"parser",
".",
"parse_args",
"(",
")"
] |
parse command line args
|
[
"parse",
"command",
"line",
"args"
] |
cd39e664c7c3d4d50d821ebf94f9f7836eefccb4
|
https://github.com/samisalkosuo/pipapp/blob/cd39e664c7c3d4d50d821ebf94f9f7836eefccb4/pipapp/pipapp.py#L44-L62
|
238,592
|
realestate-com-au/dashmat
|
dashmat/actions.py
|
requirements
|
def requirements(collector):
"""Just print out the requirements"""
out = sys.stdout
artifact = collector.configuration['dashmat'].artifact
if artifact not in (None, "", NotSpecified):
if isinstance(artifact, six.string_types):
out = open(artifact, 'w')
else:
out = artifact
for active in collector.configuration['__imported__'].values():
for requirement in active.requirements():
out.write("{0}\n".format(requirement))
|
python
|
def requirements(collector):
"""Just print out the requirements"""
out = sys.stdout
artifact = collector.configuration['dashmat'].artifact
if artifact not in (None, "", NotSpecified):
if isinstance(artifact, six.string_types):
out = open(artifact, 'w')
else:
out = artifact
for active in collector.configuration['__imported__'].values():
for requirement in active.requirements():
out.write("{0}\n".format(requirement))
|
[
"def",
"requirements",
"(",
"collector",
")",
":",
"out",
"=",
"sys",
".",
"stdout",
"artifact",
"=",
"collector",
".",
"configuration",
"[",
"'dashmat'",
"]",
".",
"artifact",
"if",
"artifact",
"not",
"in",
"(",
"None",
",",
"\"\"",
",",
"NotSpecified",
")",
":",
"if",
"isinstance",
"(",
"artifact",
",",
"six",
".",
"string_types",
")",
":",
"out",
"=",
"open",
"(",
"artifact",
",",
"'w'",
")",
"else",
":",
"out",
"=",
"artifact",
"for",
"active",
"in",
"collector",
".",
"configuration",
"[",
"'__imported__'",
"]",
".",
"values",
"(",
")",
":",
"for",
"requirement",
"in",
"active",
".",
"requirements",
"(",
")",
":",
"out",
".",
"write",
"(",
"\"{0}\\n\"",
".",
"format",
"(",
"requirement",
")",
")"
] |
Just print out the requirements
|
[
"Just",
"print",
"out",
"the",
"requirements"
] |
433886e52698f0ddb9956f087b76041966c3bcd1
|
https://github.com/realestate-com-au/dashmat/blob/433886e52698f0ddb9956f087b76041966c3bcd1/dashmat/actions.py#L68-L80
|
238,593
|
realestate-com-au/dashmat
|
dashmat/actions.py
|
run_checks
|
def run_checks(collector):
"""Just run the checks for our modules"""
artifact = collector.configuration["dashmat"].artifact
chosen = artifact
if chosen in (None, "", NotSpecified):
chosen = None
dashmat = collector.configuration["dashmat"]
modules = collector.configuration["__active_modules__"]
config_root = collector.configuration["config_root"]
module_options = collector.configuration["modules"]
datastore = JsonDataStore(os.path.join(config_root, "data.json"))
if dashmat.redis_host:
datastore = RedisDataStore(redis.Redis(dashmat.redis_host))
scheduler = Scheduler(datastore)
for name, module in modules.items():
if chosen is None or name == chosen:
server = module.make_server(module_options[name].server_options)
scheduler.register(module, server, name)
scheduler.twitch(force=True)
|
python
|
def run_checks(collector):
"""Just run the checks for our modules"""
artifact = collector.configuration["dashmat"].artifact
chosen = artifact
if chosen in (None, "", NotSpecified):
chosen = None
dashmat = collector.configuration["dashmat"]
modules = collector.configuration["__active_modules__"]
config_root = collector.configuration["config_root"]
module_options = collector.configuration["modules"]
datastore = JsonDataStore(os.path.join(config_root, "data.json"))
if dashmat.redis_host:
datastore = RedisDataStore(redis.Redis(dashmat.redis_host))
scheduler = Scheduler(datastore)
for name, module in modules.items():
if chosen is None or name == chosen:
server = module.make_server(module_options[name].server_options)
scheduler.register(module, server, name)
scheduler.twitch(force=True)
|
[
"def",
"run_checks",
"(",
"collector",
")",
":",
"artifact",
"=",
"collector",
".",
"configuration",
"[",
"\"dashmat\"",
"]",
".",
"artifact",
"chosen",
"=",
"artifact",
"if",
"chosen",
"in",
"(",
"None",
",",
"\"\"",
",",
"NotSpecified",
")",
":",
"chosen",
"=",
"None",
"dashmat",
"=",
"collector",
".",
"configuration",
"[",
"\"dashmat\"",
"]",
"modules",
"=",
"collector",
".",
"configuration",
"[",
"\"__active_modules__\"",
"]",
"config_root",
"=",
"collector",
".",
"configuration",
"[",
"\"config_root\"",
"]",
"module_options",
"=",
"collector",
".",
"configuration",
"[",
"\"modules\"",
"]",
"datastore",
"=",
"JsonDataStore",
"(",
"os",
".",
"path",
".",
"join",
"(",
"config_root",
",",
"\"data.json\"",
")",
")",
"if",
"dashmat",
".",
"redis_host",
":",
"datastore",
"=",
"RedisDataStore",
"(",
"redis",
".",
"Redis",
"(",
"dashmat",
".",
"redis_host",
")",
")",
"scheduler",
"=",
"Scheduler",
"(",
"datastore",
")",
"for",
"name",
",",
"module",
"in",
"modules",
".",
"items",
"(",
")",
":",
"if",
"chosen",
"is",
"None",
"or",
"name",
"==",
"chosen",
":",
"server",
"=",
"module",
".",
"make_server",
"(",
"module_options",
"[",
"name",
"]",
".",
"server_options",
")",
"scheduler",
".",
"register",
"(",
"module",
",",
"server",
",",
"name",
")",
"scheduler",
".",
"twitch",
"(",
"force",
"=",
"True",
")"
] |
Just run the checks for our modules
|
[
"Just",
"run",
"the",
"checks",
"for",
"our",
"modules"
] |
433886e52698f0ddb9956f087b76041966c3bcd1
|
https://github.com/realestate-com-au/dashmat/blob/433886e52698f0ddb9956f087b76041966c3bcd1/dashmat/actions.py#L83-L106
|
238,594
|
realestate-com-au/dashmat
|
dashmat/actions.py
|
list_npm_modules
|
def list_npm_modules(collector, no_print=False):
"""List the npm modules that get installed in a docker image for the react server"""
default = ReactServer().default_npm_deps()
for _, module in sorted(collector.configuration["__active_modules__"].items()):
default.update(module.npm_deps())
if not no_print:
print(json.dumps(default, indent=4, sort_keys=True))
return default
|
python
|
def list_npm_modules(collector, no_print=False):
"""List the npm modules that get installed in a docker image for the react server"""
default = ReactServer().default_npm_deps()
for _, module in sorted(collector.configuration["__active_modules__"].items()):
default.update(module.npm_deps())
if not no_print:
print(json.dumps(default, indent=4, sort_keys=True))
return default
|
[
"def",
"list_npm_modules",
"(",
"collector",
",",
"no_print",
"=",
"False",
")",
":",
"default",
"=",
"ReactServer",
"(",
")",
".",
"default_npm_deps",
"(",
")",
"for",
"_",
",",
"module",
"in",
"sorted",
"(",
"collector",
".",
"configuration",
"[",
"\"__active_modules__\"",
"]",
".",
"items",
"(",
")",
")",
":",
"default",
".",
"update",
"(",
"module",
".",
"npm_deps",
"(",
")",
")",
"if",
"not",
"no_print",
":",
"print",
"(",
"json",
".",
"dumps",
"(",
"default",
",",
"indent",
"=",
"4",
",",
"sort_keys",
"=",
"True",
")",
")",
"return",
"default"
] |
List the npm modules that get installed in a docker image for the react server
|
[
"List",
"the",
"npm",
"modules",
"that",
"get",
"installed",
"in",
"a",
"docker",
"image",
"for",
"the",
"react",
"server"
] |
433886e52698f0ddb9956f087b76041966c3bcd1
|
https://github.com/realestate-com-au/dashmat/blob/433886e52698f0ddb9956f087b76041966c3bcd1/dashmat/actions.py#L109-L117
|
238,595
|
realestate-com-au/dashmat
|
dashmat/actions.py
|
collect_dashboard_js
|
def collect_dashboard_js(collector):
"""Generate dashboard javascript for each dashboard"""
dashmat = collector.configuration["dashmat"]
modules = collector.configuration["__active_modules__"]
compiled_static_prep = dashmat.compiled_static_prep
compiled_static_folder = dashmat.compiled_static_folder
npm_deps = list_npm_modules(collector, no_print=True)
react_server = ReactServer()
react_server.prepare(npm_deps, compiled_static_folder)
for dashboard in collector.configuration["dashboards"].values():
log.info("Generating compiled javascript for dashboard:{0}".format(dashboard.path))
filename = dashboard.path.replace("_", "__").replace("/", "_")
location = os.path.join(compiled_static_folder, "dashboards", "{0}.js".format(filename))
if os.path.exists(location):
os.remove(location)
generate_dashboard_js(dashboard, react_server, compiled_static_folder, compiled_static_prep, modules)
|
python
|
def collect_dashboard_js(collector):
"""Generate dashboard javascript for each dashboard"""
dashmat = collector.configuration["dashmat"]
modules = collector.configuration["__active_modules__"]
compiled_static_prep = dashmat.compiled_static_prep
compiled_static_folder = dashmat.compiled_static_folder
npm_deps = list_npm_modules(collector, no_print=True)
react_server = ReactServer()
react_server.prepare(npm_deps, compiled_static_folder)
for dashboard in collector.configuration["dashboards"].values():
log.info("Generating compiled javascript for dashboard:{0}".format(dashboard.path))
filename = dashboard.path.replace("_", "__").replace("/", "_")
location = os.path.join(compiled_static_folder, "dashboards", "{0}.js".format(filename))
if os.path.exists(location):
os.remove(location)
generate_dashboard_js(dashboard, react_server, compiled_static_folder, compiled_static_prep, modules)
|
[
"def",
"collect_dashboard_js",
"(",
"collector",
")",
":",
"dashmat",
"=",
"collector",
".",
"configuration",
"[",
"\"dashmat\"",
"]",
"modules",
"=",
"collector",
".",
"configuration",
"[",
"\"__active_modules__\"",
"]",
"compiled_static_prep",
"=",
"dashmat",
".",
"compiled_static_prep",
"compiled_static_folder",
"=",
"dashmat",
".",
"compiled_static_folder",
"npm_deps",
"=",
"list_npm_modules",
"(",
"collector",
",",
"no_print",
"=",
"True",
")",
"react_server",
"=",
"ReactServer",
"(",
")",
"react_server",
".",
"prepare",
"(",
"npm_deps",
",",
"compiled_static_folder",
")",
"for",
"dashboard",
"in",
"collector",
".",
"configuration",
"[",
"\"dashboards\"",
"]",
".",
"values",
"(",
")",
":",
"log",
".",
"info",
"(",
"\"Generating compiled javascript for dashboard:{0}\"",
".",
"format",
"(",
"dashboard",
".",
"path",
")",
")",
"filename",
"=",
"dashboard",
".",
"path",
".",
"replace",
"(",
"\"_\"",
",",
"\"__\"",
")",
".",
"replace",
"(",
"\"/\"",
",",
"\"_\"",
")",
"location",
"=",
"os",
".",
"path",
".",
"join",
"(",
"compiled_static_folder",
",",
"\"dashboards\"",
",",
"\"{0}.js\"",
".",
"format",
"(",
"filename",
")",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"location",
")",
":",
"os",
".",
"remove",
"(",
"location",
")",
"generate_dashboard_js",
"(",
"dashboard",
",",
"react_server",
",",
"compiled_static_folder",
",",
"compiled_static_prep",
",",
"modules",
")"
] |
Generate dashboard javascript for each dashboard
|
[
"Generate",
"dashboard",
"javascript",
"for",
"each",
"dashboard"
] |
433886e52698f0ddb9956f087b76041966c3bcd1
|
https://github.com/realestate-com-au/dashmat/blob/433886e52698f0ddb9956f087b76041966c3bcd1/dashmat/actions.py#L120-L138
|
238,596
|
koriakin/binflakes
|
binflakes/sexpr/read.py
|
read_file
|
def read_file(file, filename='<input>'):
"""This is a generator that yields all top-level S-expression nodes from
a given file object."""
reader = Reader(filename)
for line in file:
yield from reader.feed_line(line)
reader.finish()
|
python
|
def read_file(file, filename='<input>'):
"""This is a generator that yields all top-level S-expression nodes from
a given file object."""
reader = Reader(filename)
for line in file:
yield from reader.feed_line(line)
reader.finish()
|
[
"def",
"read_file",
"(",
"file",
",",
"filename",
"=",
"'<input>'",
")",
":",
"reader",
"=",
"Reader",
"(",
"filename",
")",
"for",
"line",
"in",
"file",
":",
"yield",
"from",
"reader",
".",
"feed_line",
"(",
"line",
")",
"reader",
".",
"finish",
"(",
")"
] |
This is a generator that yields all top-level S-expression nodes from
a given file object.
|
[
"This",
"is",
"a",
"generator",
"that",
"yields",
"all",
"top",
"-",
"level",
"S",
"-",
"expression",
"nodes",
"from",
"a",
"given",
"file",
"object",
"."
] |
f059cecadf1c605802a713c62375b5bd5606d53f
|
https://github.com/koriakin/binflakes/blob/f059cecadf1c605802a713c62375b5bd5606d53f/binflakes/sexpr/read.py#L340-L346
|
238,597
|
koriakin/binflakes
|
binflakes/sexpr/read.py
|
Reader._feed_node
|
def _feed_node(self, value, loc):
"""A helper method called when an S-expression has been recognized.
Like feed_line, this is a generator that yields newly recognized
top-level expressions. If the reader is currently at the top level,
simply yields the passed expression. Otherwise, it appends it
to whatever is currently being parsed and yields nothing.
"""
node = GenericNode(value, loc)
if not self.stack:
yield node
else:
top = self.stack[-1]
if isinstance(top, StackEntryList):
top.items.append(node)
elif isinstance(top, StackEntryComment):
self.stack.pop()
else:
assert 0
|
python
|
def _feed_node(self, value, loc):
"""A helper method called when an S-expression has been recognized.
Like feed_line, this is a generator that yields newly recognized
top-level expressions. If the reader is currently at the top level,
simply yields the passed expression. Otherwise, it appends it
to whatever is currently being parsed and yields nothing.
"""
node = GenericNode(value, loc)
if not self.stack:
yield node
else:
top = self.stack[-1]
if isinstance(top, StackEntryList):
top.items.append(node)
elif isinstance(top, StackEntryComment):
self.stack.pop()
else:
assert 0
|
[
"def",
"_feed_node",
"(",
"self",
",",
"value",
",",
"loc",
")",
":",
"node",
"=",
"GenericNode",
"(",
"value",
",",
"loc",
")",
"if",
"not",
"self",
".",
"stack",
":",
"yield",
"node",
"else",
":",
"top",
"=",
"self",
".",
"stack",
"[",
"-",
"1",
"]",
"if",
"isinstance",
"(",
"top",
",",
"StackEntryList",
")",
":",
"top",
".",
"items",
".",
"append",
"(",
"node",
")",
"elif",
"isinstance",
"(",
"top",
",",
"StackEntryComment",
")",
":",
"self",
".",
"stack",
".",
"pop",
"(",
")",
"else",
":",
"assert",
"0"
] |
A helper method called when an S-expression has been recognized.
Like feed_line, this is a generator that yields newly recognized
top-level expressions. If the reader is currently at the top level,
simply yields the passed expression. Otherwise, it appends it
to whatever is currently being parsed and yields nothing.
|
[
"A",
"helper",
"method",
"called",
"when",
"an",
"S",
"-",
"expression",
"has",
"been",
"recognized",
".",
"Like",
"feed_line",
"this",
"is",
"a",
"generator",
"that",
"yields",
"newly",
"recognized",
"top",
"-",
"level",
"expressions",
".",
"If",
"the",
"reader",
"is",
"currently",
"at",
"the",
"top",
"level",
"simply",
"yields",
"the",
"passed",
"expression",
".",
"Otherwise",
"it",
"appends",
"it",
"to",
"whatever",
"is",
"currently",
"being",
"parsed",
"and",
"yields",
"nothing",
"."
] |
f059cecadf1c605802a713c62375b5bd5606d53f
|
https://github.com/koriakin/binflakes/blob/f059cecadf1c605802a713c62375b5bd5606d53f/binflakes/sexpr/read.py#L309-L326
|
238,598
|
pawelzny/context-loop
|
cl/loop.py
|
Loop.run_until_complete
|
def run_until_complete(self):
"""Run loop until all futures are done.
Schedule futures for execution and wait until all are done.
Return value from future, or list of values if multiple
futures had been passed to constructor or gather method.
All results will be in the same order as order of futures passed to constructor.
:Example:
.. code-block:: python
>>> async def slow():
... await ultra_slow_task()
... return 'ultra slow'
...
>>> async def fast():
... await the_fastest_task_on_earth()
...
>>> with Loop(slow(), fast()) as loop:
... result = loop.run_until_complete()
...
>>> result
['ultra slow', None]
:return: Value from future or list of values.
:rtype: None, list, Any
"""
try:
result = self.loop.run_until_complete(self.futures)
except asyncio.futures.CancelledError:
return None
else:
if self.ft_count == 1:
return result[0]
return result
|
python
|
def run_until_complete(self):
"""Run loop until all futures are done.
Schedule futures for execution and wait until all are done.
Return value from future, or list of values if multiple
futures had been passed to constructor or gather method.
All results will be in the same order as order of futures passed to constructor.
:Example:
.. code-block:: python
>>> async def slow():
... await ultra_slow_task()
... return 'ultra slow'
...
>>> async def fast():
... await the_fastest_task_on_earth()
...
>>> with Loop(slow(), fast()) as loop:
... result = loop.run_until_complete()
...
>>> result
['ultra slow', None]
:return: Value from future or list of values.
:rtype: None, list, Any
"""
try:
result = self.loop.run_until_complete(self.futures)
except asyncio.futures.CancelledError:
return None
else:
if self.ft_count == 1:
return result[0]
return result
|
[
"def",
"run_until_complete",
"(",
"self",
")",
":",
"try",
":",
"result",
"=",
"self",
".",
"loop",
".",
"run_until_complete",
"(",
"self",
".",
"futures",
")",
"except",
"asyncio",
".",
"futures",
".",
"CancelledError",
":",
"return",
"None",
"else",
":",
"if",
"self",
".",
"ft_count",
"==",
"1",
":",
"return",
"result",
"[",
"0",
"]",
"return",
"result"
] |
Run loop until all futures are done.
Schedule futures for execution and wait until all are done.
Return value from future, or list of values if multiple
futures had been passed to constructor or gather method.
All results will be in the same order as order of futures passed to constructor.
:Example:
.. code-block:: python
>>> async def slow():
... await ultra_slow_task()
... return 'ultra slow'
...
>>> async def fast():
... await the_fastest_task_on_earth()
...
>>> with Loop(slow(), fast()) as loop:
... result = loop.run_until_complete()
...
>>> result
['ultra slow', None]
:return: Value from future or list of values.
:rtype: None, list, Any
|
[
"Run",
"loop",
"until",
"all",
"futures",
"are",
"done",
"."
] |
2d3280bc4294c5e7d8590a09029c5aa2a04e8565
|
https://github.com/pawelzny/context-loop/blob/2d3280bc4294c5e7d8590a09029c5aa2a04e8565/cl/loop.py#L128-L165
|
238,599
|
aganezov/gos-asm
|
gos_asm/algo/shared/gos_asm_bg.py
|
get_irregular_vertex
|
def get_irregular_vertex(bgedge):
"""
This method is called only in irregular edges in current implementation, thus at least one edge will be irregular
"""
if not bgedge.is_irregular_edge:
raise Exception("trying to retrieve an irregular vertex from regular edge")
return bgedge.vertex1 if bgedge.vertex1.is_irregular_vertex else bgedge.vertex2
|
python
|
def get_irregular_vertex(bgedge):
"""
This method is called only in irregular edges in current implementation, thus at least one edge will be irregular
"""
if not bgedge.is_irregular_edge:
raise Exception("trying to retrieve an irregular vertex from regular edge")
return bgedge.vertex1 if bgedge.vertex1.is_irregular_vertex else bgedge.vertex2
|
[
"def",
"get_irregular_vertex",
"(",
"bgedge",
")",
":",
"if",
"not",
"bgedge",
".",
"is_irregular_edge",
":",
"raise",
"Exception",
"(",
"\"trying to retrieve an irregular vertex from regular edge\"",
")",
"return",
"bgedge",
".",
"vertex1",
"if",
"bgedge",
".",
"vertex1",
".",
"is_irregular_vertex",
"else",
"bgedge",
".",
"vertex2"
] |
This method is called only in irregular edges in current implementation, thus at least one edge will be irregular
|
[
"This",
"method",
"is",
"called",
"only",
"in",
"irregular",
"edges",
"in",
"current",
"implementation",
"thus",
"at",
"least",
"one",
"edge",
"will",
"be",
"irregular"
] |
7161d80344dc32db47221a0503a43e30433f1db0
|
https://github.com/aganezov/gos-asm/blob/7161d80344dc32db47221a0503a43e30433f1db0/gos_asm/algo/shared/gos_asm_bg.py#L17-L23
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.