id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
240,400
|
jayclassless/basicserial
|
src/basicserial/__init__.py
|
from_toml
|
def from_toml(value, native_datetimes=True):
"""
Deserializes the given value from TOML.
:param value: the value to deserialize
:type value: str
:param native_datetimes:
whether or not strings that look like dates/times should be
automatically cast to the native objects, or left as strings; if not
specified, defaults to ``True``
:type native_datetimes: bool
"""
if not toml:
raise NotImplementedError('No supported TOML library available')
result = toml.loads(value)
if native_datetimes:
result = convert_datetimes(result)
return result
|
python
|
def from_toml(value, native_datetimes=True):
"""
Deserializes the given value from TOML.
:param value: the value to deserialize
:type value: str
:param native_datetimes:
whether or not strings that look like dates/times should be
automatically cast to the native objects, or left as strings; if not
specified, defaults to ``True``
:type native_datetimes: bool
"""
if not toml:
raise NotImplementedError('No supported TOML library available')
result = toml.loads(value)
if native_datetimes:
result = convert_datetimes(result)
return result
|
[
"def",
"from_toml",
"(",
"value",
",",
"native_datetimes",
"=",
"True",
")",
":",
"if",
"not",
"toml",
":",
"raise",
"NotImplementedError",
"(",
"'No supported TOML library available'",
")",
"result",
"=",
"toml",
".",
"loads",
"(",
"value",
")",
"if",
"native_datetimes",
":",
"result",
"=",
"convert_datetimes",
"(",
"result",
")",
"return",
"result"
] |
Deserializes the given value from TOML.
:param value: the value to deserialize
:type value: str
:param native_datetimes:
whether or not strings that look like dates/times should be
automatically cast to the native objects, or left as strings; if not
specified, defaults to ``True``
:type native_datetimes: bool
|
[
"Deserializes",
"the",
"given",
"value",
"from",
"TOML",
"."
] |
da779edd955ba1009d14fae4e5926e29ad112b9d
|
https://github.com/jayclassless/basicserial/blob/da779edd955ba1009d14fae4e5926e29ad112b9d/src/basicserial/__init__.py#L372-L393
|
240,401
|
MacHu-GWU/inspect_mate-project
|
dev/analysis.py
|
merge_true_table
|
def merge_true_table():
"""Merge all true table into single excel file.
"""
writer = pd.ExcelWriter("True Table.xlsx")
for p in Path(__file__).parent.select_by_ext(".csv"):
df = pd.read_csv(p.abspath, index_col=0)
df.to_excel(writer, p.fname, index=True)
writer.save()
|
python
|
def merge_true_table():
"""Merge all true table into single excel file.
"""
writer = pd.ExcelWriter("True Table.xlsx")
for p in Path(__file__).parent.select_by_ext(".csv"):
df = pd.read_csv(p.abspath, index_col=0)
df.to_excel(writer, p.fname, index=True)
writer.save()
|
[
"def",
"merge_true_table",
"(",
")",
":",
"writer",
"=",
"pd",
".",
"ExcelWriter",
"(",
"\"True Table.xlsx\"",
")",
"for",
"p",
"in",
"Path",
"(",
"__file__",
")",
".",
"parent",
".",
"select_by_ext",
"(",
"\".csv\"",
")",
":",
"df",
"=",
"pd",
".",
"read_csv",
"(",
"p",
".",
"abspath",
",",
"index_col",
"=",
"0",
")",
"df",
".",
"to_excel",
"(",
"writer",
",",
"p",
".",
"fname",
",",
"index",
"=",
"True",
")",
"writer",
".",
"save",
"(",
")"
] |
Merge all true table into single excel file.
|
[
"Merge",
"all",
"true",
"table",
"into",
"single",
"excel",
"file",
"."
] |
a3d0980ee259daf578bb2226fea311af08ab435b
|
https://github.com/MacHu-GWU/inspect_mate-project/blob/a3d0980ee259daf578bb2226fea311af08ab435b/dev/analysis.py#L107-L114
|
240,402
|
kblin/aio-standalone
|
aiostandalone/signal.py
|
Signal.send
|
async def send(self, *args, **kwargs):
"""Send args and kwargs to all registered callbacks"""
for callback in self:
res = callback(*args, **kwargs)
if asyncio.iscoroutine(res) or isinstance(res, asyncio.Future):
await res
|
python
|
async def send(self, *args, **kwargs):
"""Send args and kwargs to all registered callbacks"""
for callback in self:
res = callback(*args, **kwargs)
if asyncio.iscoroutine(res) or isinstance(res, asyncio.Future):
await res
|
[
"async",
"def",
"send",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"callback",
"in",
"self",
":",
"res",
"=",
"callback",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"asyncio",
".",
"iscoroutine",
"(",
"res",
")",
"or",
"isinstance",
"(",
"res",
",",
"asyncio",
".",
"Future",
")",
":",
"await",
"res"
] |
Send args and kwargs to all registered callbacks
|
[
"Send",
"args",
"and",
"kwargs",
"to",
"all",
"registered",
"callbacks"
] |
21f7212ee23e7c2dff679fbf3e9c8d9acf77b568
|
https://github.com/kblin/aio-standalone/blob/21f7212ee23e7c2dff679fbf3e9c8d9acf77b568/aiostandalone/signal.py#L20-L25
|
240,403
|
inveniosoftware-contrib/record-recommender
|
record_recommender/app.py
|
setup_logging
|
def setup_logging(config=None):
"""Setup logging configuration."""
# TODO: integrate in general config file
print(__name__)
if config and config.get('logging'):
logging.config.dictConfig(config.get('logging'))
else:
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s',
level=logging.DEBUG)
|
python
|
def setup_logging(config=None):
"""Setup logging configuration."""
# TODO: integrate in general config file
print(__name__)
if config and config.get('logging'):
logging.config.dictConfig(config.get('logging'))
else:
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s',
level=logging.DEBUG)
|
[
"def",
"setup_logging",
"(",
"config",
"=",
"None",
")",
":",
"# TODO: integrate in general config file",
"print",
"(",
"__name__",
")",
"if",
"config",
"and",
"config",
".",
"get",
"(",
"'logging'",
")",
":",
"logging",
".",
"config",
".",
"dictConfig",
"(",
"config",
".",
"get",
"(",
"'logging'",
")",
")",
"else",
":",
"logging",
".",
"basicConfig",
"(",
"format",
"=",
"'%(asctime)s %(levelname)s:%(message)s'",
",",
"level",
"=",
"logging",
".",
"DEBUG",
")"
] |
Setup logging configuration.
|
[
"Setup",
"logging",
"configuration",
"."
] |
07f71e783369e6373218b5e6ba0bf15901e9251a
|
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/app.py#L73-L81
|
240,404
|
inveniosoftware-contrib/record-recommender
|
record_recommender/app.py
|
_create_all_recommendations
|
def _create_all_recommendations(cores, ip_views=False, config=None):
"""Calculate all recommendations in multiple processes."""
global _reco, _store
_reco = GraphRecommender(_store)
_reco.load_profile('Profiles')
if ip_views:
_reco.load_profile('Profiles_IP')
manager = Manager()
record_list = manager.list(_reco.all_records.keys())
# record_list = manager.list(list(_reco.all_records.keys())[:10])
num_records = len(_reco.all_records.keys())
logger.info("Recommendations to build: {}".format(num_records))
start = time.time()
reco_version = config.get('recommendation_version', 0)
done_records = num_records
if cores <= 1:
_create_recommendations(0, record_list, reco_version)
else:
try:
pool = Pool(cores)
multiple_results = [pool.apply_async(_create_recommendations,
(i, record_list, reco_version))
for i in range(cores)]
# Wait for all processes to exit
[res.get() for res in multiple_results]
except KeyboardInterrupt:
print("Caught KeyboardInterrupt, terminating workers")
# TODO: Update done_records.
pool.terminate()
pool.join()
duration = time.time() - start
logger.info("Time {} for {} recommendations".format(duration,
done_records))
|
python
|
def _create_all_recommendations(cores, ip_views=False, config=None):
"""Calculate all recommendations in multiple processes."""
global _reco, _store
_reco = GraphRecommender(_store)
_reco.load_profile('Profiles')
if ip_views:
_reco.load_profile('Profiles_IP')
manager = Manager()
record_list = manager.list(_reco.all_records.keys())
# record_list = manager.list(list(_reco.all_records.keys())[:10])
num_records = len(_reco.all_records.keys())
logger.info("Recommendations to build: {}".format(num_records))
start = time.time()
reco_version = config.get('recommendation_version', 0)
done_records = num_records
if cores <= 1:
_create_recommendations(0, record_list, reco_version)
else:
try:
pool = Pool(cores)
multiple_results = [pool.apply_async(_create_recommendations,
(i, record_list, reco_version))
for i in range(cores)]
# Wait for all processes to exit
[res.get() for res in multiple_results]
except KeyboardInterrupt:
print("Caught KeyboardInterrupt, terminating workers")
# TODO: Update done_records.
pool.terminate()
pool.join()
duration = time.time() - start
logger.info("Time {} for {} recommendations".format(duration,
done_records))
|
[
"def",
"_create_all_recommendations",
"(",
"cores",
",",
"ip_views",
"=",
"False",
",",
"config",
"=",
"None",
")",
":",
"global",
"_reco",
",",
"_store",
"_reco",
"=",
"GraphRecommender",
"(",
"_store",
")",
"_reco",
".",
"load_profile",
"(",
"'Profiles'",
")",
"if",
"ip_views",
":",
"_reco",
".",
"load_profile",
"(",
"'Profiles_IP'",
")",
"manager",
"=",
"Manager",
"(",
")",
"record_list",
"=",
"manager",
".",
"list",
"(",
"_reco",
".",
"all_records",
".",
"keys",
"(",
")",
")",
"# record_list = manager.list(list(_reco.all_records.keys())[:10])",
"num_records",
"=",
"len",
"(",
"_reco",
".",
"all_records",
".",
"keys",
"(",
")",
")",
"logger",
".",
"info",
"(",
"\"Recommendations to build: {}\"",
".",
"format",
"(",
"num_records",
")",
")",
"start",
"=",
"time",
".",
"time",
"(",
")",
"reco_version",
"=",
"config",
".",
"get",
"(",
"'recommendation_version'",
",",
"0",
")",
"done_records",
"=",
"num_records",
"if",
"cores",
"<=",
"1",
":",
"_create_recommendations",
"(",
"0",
",",
"record_list",
",",
"reco_version",
")",
"else",
":",
"try",
":",
"pool",
"=",
"Pool",
"(",
"cores",
")",
"multiple_results",
"=",
"[",
"pool",
".",
"apply_async",
"(",
"_create_recommendations",
",",
"(",
"i",
",",
"record_list",
",",
"reco_version",
")",
")",
"for",
"i",
"in",
"range",
"(",
"cores",
")",
"]",
"# Wait for all processes to exit",
"[",
"res",
".",
"get",
"(",
")",
"for",
"res",
"in",
"multiple_results",
"]",
"except",
"KeyboardInterrupt",
":",
"print",
"(",
"\"Caught KeyboardInterrupt, terminating workers\"",
")",
"# TODO: Update done_records.",
"pool",
".",
"terminate",
"(",
")",
"pool",
".",
"join",
"(",
")",
"duration",
"=",
"time",
".",
"time",
"(",
")",
"-",
"start",
"logger",
".",
"info",
"(",
"\"Time {} for {} recommendations\"",
".",
"format",
"(",
"duration",
",",
"done_records",
")",
")"
] |
Calculate all recommendations in multiple processes.
|
[
"Calculate",
"all",
"recommendations",
"in",
"multiple",
"processes",
"."
] |
07f71e783369e6373218b5e6ba0bf15901e9251a
|
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/app.py#L109-L145
|
240,405
|
inveniosoftware-contrib/record-recommender
|
record_recommender/app.py
|
RecordRecommender.fetch_weeks
|
def fetch_weeks(self, weeks, overwrite=False):
"""Fetch and cache the requested weeks."""
esf = ElasticsearchFetcher(self.store, self.config)
for year, week in weeks:
print("Fetch {}-{}".format(year, week))
esf.fetch(year, week, overwrite)
|
python
|
def fetch_weeks(self, weeks, overwrite=False):
"""Fetch and cache the requested weeks."""
esf = ElasticsearchFetcher(self.store, self.config)
for year, week in weeks:
print("Fetch {}-{}".format(year, week))
esf.fetch(year, week, overwrite)
|
[
"def",
"fetch_weeks",
"(",
"self",
",",
"weeks",
",",
"overwrite",
"=",
"False",
")",
":",
"esf",
"=",
"ElasticsearchFetcher",
"(",
"self",
".",
"store",
",",
"self",
".",
"config",
")",
"for",
"year",
",",
"week",
"in",
"weeks",
":",
"print",
"(",
"\"Fetch {}-{}\"",
".",
"format",
"(",
"year",
",",
"week",
")",
")",
"esf",
".",
"fetch",
"(",
"year",
",",
"week",
",",
"overwrite",
")"
] |
Fetch and cache the requested weeks.
|
[
"Fetch",
"and",
"cache",
"the",
"requested",
"weeks",
"."
] |
07f71e783369e6373218b5e6ba0bf15901e9251a
|
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/app.py#L95-L100
|
240,406
|
inveniosoftware-contrib/record-recommender
|
record_recommender/app.py
|
RecordRecommender.create_all_recommendations
|
def create_all_recommendations(self, cores, ip_views=False):
"""Calculate the recommendations for all records."""
global _store
_store = self.store
_create_all_recommendations(cores, ip_views, self.config)
|
python
|
def create_all_recommendations(self, cores, ip_views=False):
"""Calculate the recommendations for all records."""
global _store
_store = self.store
_create_all_recommendations(cores, ip_views, self.config)
|
[
"def",
"create_all_recommendations",
"(",
"self",
",",
"cores",
",",
"ip_views",
"=",
"False",
")",
":",
"global",
"_store",
"_store",
"=",
"self",
".",
"store",
"_create_all_recommendations",
"(",
"cores",
",",
"ip_views",
",",
"self",
".",
"config",
")"
] |
Calculate the recommendations for all records.
|
[
"Calculate",
"the",
"recommendations",
"for",
"all",
"records",
"."
] |
07f71e783369e6373218b5e6ba0bf15901e9251a
|
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/app.py#L102-L106
|
240,407
|
wdbm/pyprel
|
pyprel/__init__.py
|
table_dataset_database_table
|
def table_dataset_database_table(
table = None,
include_attributes = None,
rows_limit = None,
print_progress = False,
):
"""
Create a pyprel table contents list from a database table of the module
dataset. Attributes to be included in the table can be specified; by
default, all attributes are included. A limit on the number of rows included
can be specified. Progress on building the table can be reported.
"""
if print_progress:
import shijian
progress = shijian.Progress()
progress.engage_quick_calculation_mode()
number_of_rows = len(table)
if include_attributes:
columns = include_attributes
else:
columns = table.columns
table_contents = [columns]
for index_row, row in enumerate(table):
if rows_limit is not None:
if index_row >= rows_limit:
break
row_contents = []
for column in columns:
try:
string_representation = str(row[column])
except:
string_representation = str(row[column].encode("utf-8"))
row_contents.append(string_representation)
table_contents.append(row_contents)
if print_progress:
print(progress.add_datum(
fraction = float(index_row) / float(number_of_rows))
)
return table_contents
|
python
|
def table_dataset_database_table(
table = None,
include_attributes = None,
rows_limit = None,
print_progress = False,
):
"""
Create a pyprel table contents list from a database table of the module
dataset. Attributes to be included in the table can be specified; by
default, all attributes are included. A limit on the number of rows included
can be specified. Progress on building the table can be reported.
"""
if print_progress:
import shijian
progress = shijian.Progress()
progress.engage_quick_calculation_mode()
number_of_rows = len(table)
if include_attributes:
columns = include_attributes
else:
columns = table.columns
table_contents = [columns]
for index_row, row in enumerate(table):
if rows_limit is not None:
if index_row >= rows_limit:
break
row_contents = []
for column in columns:
try:
string_representation = str(row[column])
except:
string_representation = str(row[column].encode("utf-8"))
row_contents.append(string_representation)
table_contents.append(row_contents)
if print_progress:
print(progress.add_datum(
fraction = float(index_row) / float(number_of_rows))
)
return table_contents
|
[
"def",
"table_dataset_database_table",
"(",
"table",
"=",
"None",
",",
"include_attributes",
"=",
"None",
",",
"rows_limit",
"=",
"None",
",",
"print_progress",
"=",
"False",
",",
")",
":",
"if",
"print_progress",
":",
"import",
"shijian",
"progress",
"=",
"shijian",
".",
"Progress",
"(",
")",
"progress",
".",
"engage_quick_calculation_mode",
"(",
")",
"number_of_rows",
"=",
"len",
"(",
"table",
")",
"if",
"include_attributes",
":",
"columns",
"=",
"include_attributes",
"else",
":",
"columns",
"=",
"table",
".",
"columns",
"table_contents",
"=",
"[",
"columns",
"]",
"for",
"index_row",
",",
"row",
"in",
"enumerate",
"(",
"table",
")",
":",
"if",
"rows_limit",
"is",
"not",
"None",
":",
"if",
"index_row",
">=",
"rows_limit",
":",
"break",
"row_contents",
"=",
"[",
"]",
"for",
"column",
"in",
"columns",
":",
"try",
":",
"string_representation",
"=",
"str",
"(",
"row",
"[",
"column",
"]",
")",
"except",
":",
"string_representation",
"=",
"str",
"(",
"row",
"[",
"column",
"]",
".",
"encode",
"(",
"\"utf-8\"",
")",
")",
"row_contents",
".",
"append",
"(",
"string_representation",
")",
"table_contents",
".",
"append",
"(",
"row_contents",
")",
"if",
"print_progress",
":",
"print",
"(",
"progress",
".",
"add_datum",
"(",
"fraction",
"=",
"float",
"(",
"index_row",
")",
"/",
"float",
"(",
"number_of_rows",
")",
")",
")",
"return",
"table_contents"
] |
Create a pyprel table contents list from a database table of the module
dataset. Attributes to be included in the table can be specified; by
default, all attributes are included. A limit on the number of rows included
can be specified. Progress on building the table can be reported.
|
[
"Create",
"a",
"pyprel",
"table",
"contents",
"list",
"from",
"a",
"database",
"table",
"of",
"the",
"module",
"dataset",
".",
"Attributes",
"to",
"be",
"included",
"in",
"the",
"table",
"can",
"be",
"specified",
";",
"by",
"default",
"all",
"attributes",
"are",
"included",
".",
"A",
"limit",
"on",
"the",
"number",
"of",
"rows",
"included",
"can",
"be",
"specified",
".",
"Progress",
"on",
"building",
"the",
"table",
"can",
"be",
"reported",
"."
] |
c1253ea3f8c60a2f5493a0d5a61ca3c84df7c21d
|
https://github.com/wdbm/pyprel/blob/c1253ea3f8c60a2f5493a0d5a61ca3c84df7c21d/pyprel/__init__.py#L275-L317
|
240,408
|
noobermin/lspreader
|
lspreader/nearest.py
|
simple_nearest_indices
|
def simple_nearest_indices(xs,res):
'''
Simple nearest interpolator that interpolates based on
the minima and maxima of points based on the passed
resolution in res.
Parameters:
-----------
xs -- A collection of `ndim` arrays of points.
res -- List of resolutions.
'''
maxs = [max(a) for a in xs]
mins = [min(a) for a in xs]
XS = [np.linspace(mn, mx, r) for mn,mx,r in zip(mins,maxs,res)];
XS = tuple(np.meshgrid(*XS,indexing='ij'));
if type(xs) != tuple:
xs = tuple(xs);
return nearest_indices(xs,XS);
|
python
|
def simple_nearest_indices(xs,res):
'''
Simple nearest interpolator that interpolates based on
the minima and maxima of points based on the passed
resolution in res.
Parameters:
-----------
xs -- A collection of `ndim` arrays of points.
res -- List of resolutions.
'''
maxs = [max(a) for a in xs]
mins = [min(a) for a in xs]
XS = [np.linspace(mn, mx, r) for mn,mx,r in zip(mins,maxs,res)];
XS = tuple(np.meshgrid(*XS,indexing='ij'));
if type(xs) != tuple:
xs = tuple(xs);
return nearest_indices(xs,XS);
|
[
"def",
"simple_nearest_indices",
"(",
"xs",
",",
"res",
")",
":",
"maxs",
"=",
"[",
"max",
"(",
"a",
")",
"for",
"a",
"in",
"xs",
"]",
"mins",
"=",
"[",
"min",
"(",
"a",
")",
"for",
"a",
"in",
"xs",
"]",
"XS",
"=",
"[",
"np",
".",
"linspace",
"(",
"mn",
",",
"mx",
",",
"r",
")",
"for",
"mn",
",",
"mx",
",",
"r",
"in",
"zip",
"(",
"mins",
",",
"maxs",
",",
"res",
")",
"]",
"XS",
"=",
"tuple",
"(",
"np",
".",
"meshgrid",
"(",
"*",
"XS",
",",
"indexing",
"=",
"'ij'",
")",
")",
"if",
"type",
"(",
"xs",
")",
"!=",
"tuple",
":",
"xs",
"=",
"tuple",
"(",
"xs",
")",
"return",
"nearest_indices",
"(",
"xs",
",",
"XS",
")"
] |
Simple nearest interpolator that interpolates based on
the minima and maxima of points based on the passed
resolution in res.
Parameters:
-----------
xs -- A collection of `ndim` arrays of points.
res -- List of resolutions.
|
[
"Simple",
"nearest",
"interpolator",
"that",
"interpolates",
"based",
"on",
"the",
"minima",
"and",
"maxima",
"of",
"points",
"based",
"on",
"the",
"passed",
"resolution",
"in",
"res",
"."
] |
903b9d6427513b07986ffacf76cbca54e18d8be6
|
https://github.com/noobermin/lspreader/blob/903b9d6427513b07986ffacf76cbca54e18d8be6/lspreader/nearest.py#L66-L84
|
240,409
|
wtsi-hgi/gitlab-build-variables
|
gitlabbuildvariables/manager.py
|
ProjectVariablesManager.clear
|
def clear(self):
"""
Clears all of the build variables.
"""
for variable in self._project.variables.list(all=True):
variable.delete()
|
python
|
def clear(self):
"""
Clears all of the build variables.
"""
for variable in self._project.variables.list(all=True):
variable.delete()
|
[
"def",
"clear",
"(",
"self",
")",
":",
"for",
"variable",
"in",
"self",
".",
"_project",
".",
"variables",
".",
"list",
"(",
"all",
"=",
"True",
")",
":",
"variable",
".",
"delete",
"(",
")"
] |
Clears all of the build variables.
|
[
"Clears",
"all",
"of",
"the",
"build",
"variables",
"."
] |
ed1afe50bc41fa20ffb29cacba5ff6dbc2446808
|
https://github.com/wtsi-hgi/gitlab-build-variables/blob/ed1afe50bc41fa20ffb29cacba5ff6dbc2446808/gitlabbuildvariables/manager.py#L51-L56
|
240,410
|
Metatab/tableintuit
|
tableintuit/types.py
|
Column._resolved_type
|
def _resolved_type(self):
"""Return the type for the columns, and a flag to indicate that the
column has codes."""
import datetime
self.type_ratios = {test: (float(self.type_counts[test]) / float(self.count)) if self.count else None
for test, testf in tests + [(None, None)]}
# If it is more than 5% str, it's a str
try:
if self.type_ratios.get(text_type,0) + self.type_ratios.get(binary_type,0) > .05:
if self.type_counts[text_type] > 0:
return text_type, False
elif self.type_counts[binary_type] > 0:
return binary_type, False
except TypeError as e:
# This is probably the result of the type being unknown
pass
if self.type_counts[datetime.datetime] > 0:
num_type = datetime.datetime
elif self.type_counts[datetime.date] > 0:
num_type = datetime.date
elif self.type_counts[datetime.time] > 0:
num_type = datetime.time
elif self.type_counts[float] > 0:
num_type = float
elif self.type_counts[int] > 0:
num_type = int
elif self.type_counts[text_type] > 0:
num_type = text_type
elif self.type_counts[binary_type] > 0:
num_type = binary_type
else:
num_type = unknown
if self.type_counts[binary_type] > 0 and num_type != binary_type:
has_codes = True
else:
has_codes = False
return num_type, has_codes
|
python
|
def _resolved_type(self):
"""Return the type for the columns, and a flag to indicate that the
column has codes."""
import datetime
self.type_ratios = {test: (float(self.type_counts[test]) / float(self.count)) if self.count else None
for test, testf in tests + [(None, None)]}
# If it is more than 5% str, it's a str
try:
if self.type_ratios.get(text_type,0) + self.type_ratios.get(binary_type,0) > .05:
if self.type_counts[text_type] > 0:
return text_type, False
elif self.type_counts[binary_type] > 0:
return binary_type, False
except TypeError as e:
# This is probably the result of the type being unknown
pass
if self.type_counts[datetime.datetime] > 0:
num_type = datetime.datetime
elif self.type_counts[datetime.date] > 0:
num_type = datetime.date
elif self.type_counts[datetime.time] > 0:
num_type = datetime.time
elif self.type_counts[float] > 0:
num_type = float
elif self.type_counts[int] > 0:
num_type = int
elif self.type_counts[text_type] > 0:
num_type = text_type
elif self.type_counts[binary_type] > 0:
num_type = binary_type
else:
num_type = unknown
if self.type_counts[binary_type] > 0 and num_type != binary_type:
has_codes = True
else:
has_codes = False
return num_type, has_codes
|
[
"def",
"_resolved_type",
"(",
"self",
")",
":",
"import",
"datetime",
"self",
".",
"type_ratios",
"=",
"{",
"test",
":",
"(",
"float",
"(",
"self",
".",
"type_counts",
"[",
"test",
"]",
")",
"/",
"float",
"(",
"self",
".",
"count",
")",
")",
"if",
"self",
".",
"count",
"else",
"None",
"for",
"test",
",",
"testf",
"in",
"tests",
"+",
"[",
"(",
"None",
",",
"None",
")",
"]",
"}",
"# If it is more than 5% str, it's a str",
"try",
":",
"if",
"self",
".",
"type_ratios",
".",
"get",
"(",
"text_type",
",",
"0",
")",
"+",
"self",
".",
"type_ratios",
".",
"get",
"(",
"binary_type",
",",
"0",
")",
">",
".05",
":",
"if",
"self",
".",
"type_counts",
"[",
"text_type",
"]",
">",
"0",
":",
"return",
"text_type",
",",
"False",
"elif",
"self",
".",
"type_counts",
"[",
"binary_type",
"]",
">",
"0",
":",
"return",
"binary_type",
",",
"False",
"except",
"TypeError",
"as",
"e",
":",
"# This is probably the result of the type being unknown",
"pass",
"if",
"self",
".",
"type_counts",
"[",
"datetime",
".",
"datetime",
"]",
">",
"0",
":",
"num_type",
"=",
"datetime",
".",
"datetime",
"elif",
"self",
".",
"type_counts",
"[",
"datetime",
".",
"date",
"]",
">",
"0",
":",
"num_type",
"=",
"datetime",
".",
"date",
"elif",
"self",
".",
"type_counts",
"[",
"datetime",
".",
"time",
"]",
">",
"0",
":",
"num_type",
"=",
"datetime",
".",
"time",
"elif",
"self",
".",
"type_counts",
"[",
"float",
"]",
">",
"0",
":",
"num_type",
"=",
"float",
"elif",
"self",
".",
"type_counts",
"[",
"int",
"]",
">",
"0",
":",
"num_type",
"=",
"int",
"elif",
"self",
".",
"type_counts",
"[",
"text_type",
"]",
">",
"0",
":",
"num_type",
"=",
"text_type",
"elif",
"self",
".",
"type_counts",
"[",
"binary_type",
"]",
">",
"0",
":",
"num_type",
"=",
"binary_type",
"else",
":",
"num_type",
"=",
"unknown",
"if",
"self",
".",
"type_counts",
"[",
"binary_type",
"]",
">",
"0",
"and",
"num_type",
"!=",
"binary_type",
":",
"has_codes",
"=",
"True",
"else",
":",
"has_codes",
"=",
"False",
"return",
"num_type",
",",
"has_codes"
] |
Return the type for the columns, and a flag to indicate that the
column has codes.
|
[
"Return",
"the",
"type",
"for",
"the",
"columns",
"and",
"a",
"flag",
"to",
"indicate",
"that",
"the",
"column",
"has",
"codes",
"."
] |
9a3d500d5d90e44e6637dd17ca4c8dae474d6d4c
|
https://github.com/Metatab/tableintuit/blob/9a3d500d5d90e44e6637dd17ca4c8dae474d6d4c/tableintuit/types.py#L242-L293
|
240,411
|
Metatab/tableintuit
|
tableintuit/types.py
|
TypeIntuiter.promote_type
|
def promote_type(orig_type, new_type):
"""Given a table with an original type, decide whether a new determination of a new applicable type
should overide the existing one"""
if not new_type:
return orig_type
if not orig_type:
return new_type
try:
orig_type = orig_type.__name__
except AttributeError:
pass
try:
new_type = new_type.__name__
except AttributeError:
pass
type_precidence = ['unknown', 'int', 'float', 'date', 'time', 'datetime', 'str', 'bytes', 'unicode']
# TODO This will fail for dates and times.
if type_precidence.index(new_type) > type_precidence.index(orig_type):
return new_type
else:
return orig_type
|
python
|
def promote_type(orig_type, new_type):
"""Given a table with an original type, decide whether a new determination of a new applicable type
should overide the existing one"""
if not new_type:
return orig_type
if not orig_type:
return new_type
try:
orig_type = orig_type.__name__
except AttributeError:
pass
try:
new_type = new_type.__name__
except AttributeError:
pass
type_precidence = ['unknown', 'int', 'float', 'date', 'time', 'datetime', 'str', 'bytes', 'unicode']
# TODO This will fail for dates and times.
if type_precidence.index(new_type) > type_precidence.index(orig_type):
return new_type
else:
return orig_type
|
[
"def",
"promote_type",
"(",
"orig_type",
",",
"new_type",
")",
":",
"if",
"not",
"new_type",
":",
"return",
"orig_type",
"if",
"not",
"orig_type",
":",
"return",
"new_type",
"try",
":",
"orig_type",
"=",
"orig_type",
".",
"__name__",
"except",
"AttributeError",
":",
"pass",
"try",
":",
"new_type",
"=",
"new_type",
".",
"__name__",
"except",
"AttributeError",
":",
"pass",
"type_precidence",
"=",
"[",
"'unknown'",
",",
"'int'",
",",
"'float'",
",",
"'date'",
",",
"'time'",
",",
"'datetime'",
",",
"'str'",
",",
"'bytes'",
",",
"'unicode'",
"]",
"# TODO This will fail for dates and times.",
"if",
"type_precidence",
".",
"index",
"(",
"new_type",
")",
">",
"type_precidence",
".",
"index",
"(",
"orig_type",
")",
":",
"return",
"new_type",
"else",
":",
"return",
"orig_type"
] |
Given a table with an original type, decide whether a new determination of a new applicable type
should overide the existing one
|
[
"Given",
"a",
"table",
"with",
"an",
"original",
"type",
"decide",
"whether",
"a",
"new",
"determination",
"of",
"a",
"new",
"applicable",
"type",
"should",
"overide",
"the",
"existing",
"one"
] |
9a3d500d5d90e44e6637dd17ca4c8dae474d6d4c
|
https://github.com/Metatab/tableintuit/blob/9a3d500d5d90e44e6637dd17ca4c8dae474d6d4c/tableintuit/types.py#L423-L450
|
240,412
|
openp2pdesign/makerlabs
|
makerlabs/repaircafe_org.py
|
data_from_repaircafe_org
|
def data_from_repaircafe_org():
"""Gets data from repaircafe_org."""
# Use Chrome as a browser
browser = webdriver.Chrome()
# Use PhantomJS as a browser
# browser = webdriver.PhantomJS('phantomjs')
browser.get("https://repaircafe.org/en/?s=Contact+the+local+organisers")
browser.maximize_window()
# Iterate over results (the #viewmore_link button)
viewmore_button = True
while viewmore_button:
try:
viewmore = browser.find_element_by_id("viewmore_link")
# Scroll to the link in order to make it visible
browser.execute_script("arguments[0].scrollIntoView();", viewmore)
# Keep searching
viewmore.click()
except:
# If there's an error, we have reached the end of the search
viewmore_button = False
# Give a bit of time for loading the search results
sleep(2)
# Load the source code
page_source = BeautifulSoup(browser.page_source, "lxml")
# Close the browser
browser.quit()
# Parse the source code in order to find all the links under H4s
data = []
for h4 in page_source.find_all("h4"):
for a in h4.find_all('a', href=True):
data.append({"name": a.contents[0], "url": a['href']})
return data
|
python
|
def data_from_repaircafe_org():
"""Gets data from repaircafe_org."""
# Use Chrome as a browser
browser = webdriver.Chrome()
# Use PhantomJS as a browser
# browser = webdriver.PhantomJS('phantomjs')
browser.get("https://repaircafe.org/en/?s=Contact+the+local+organisers")
browser.maximize_window()
# Iterate over results (the #viewmore_link button)
viewmore_button = True
while viewmore_button:
try:
viewmore = browser.find_element_by_id("viewmore_link")
# Scroll to the link in order to make it visible
browser.execute_script("arguments[0].scrollIntoView();", viewmore)
# Keep searching
viewmore.click()
except:
# If there's an error, we have reached the end of the search
viewmore_button = False
# Give a bit of time for loading the search results
sleep(2)
# Load the source code
page_source = BeautifulSoup(browser.page_source, "lxml")
# Close the browser
browser.quit()
# Parse the source code in order to find all the links under H4s
data = []
for h4 in page_source.find_all("h4"):
for a in h4.find_all('a', href=True):
data.append({"name": a.contents[0], "url": a['href']})
return data
|
[
"def",
"data_from_repaircafe_org",
"(",
")",
":",
"# Use Chrome as a browser",
"browser",
"=",
"webdriver",
".",
"Chrome",
"(",
")",
"# Use PhantomJS as a browser",
"# browser = webdriver.PhantomJS('phantomjs')",
"browser",
".",
"get",
"(",
"\"https://repaircafe.org/en/?s=Contact+the+local+organisers\"",
")",
"browser",
".",
"maximize_window",
"(",
")",
"# Iterate over results (the #viewmore_link button)",
"viewmore_button",
"=",
"True",
"while",
"viewmore_button",
":",
"try",
":",
"viewmore",
"=",
"browser",
".",
"find_element_by_id",
"(",
"\"viewmore_link\"",
")",
"# Scroll to the link in order to make it visible",
"browser",
".",
"execute_script",
"(",
"\"arguments[0].scrollIntoView();\"",
",",
"viewmore",
")",
"# Keep searching",
"viewmore",
".",
"click",
"(",
")",
"except",
":",
"# If there's an error, we have reached the end of the search",
"viewmore_button",
"=",
"False",
"# Give a bit of time for loading the search results",
"sleep",
"(",
"2",
")",
"# Load the source code",
"page_source",
"=",
"BeautifulSoup",
"(",
"browser",
".",
"page_source",
",",
"\"lxml\"",
")",
"# Close the browser",
"browser",
".",
"quit",
"(",
")",
"# Parse the source code in order to find all the links under H4s",
"data",
"=",
"[",
"]",
"for",
"h4",
"in",
"page_source",
".",
"find_all",
"(",
"\"h4\"",
")",
":",
"for",
"a",
"in",
"h4",
".",
"find_all",
"(",
"'a'",
",",
"href",
"=",
"True",
")",
":",
"data",
".",
"append",
"(",
"{",
"\"name\"",
":",
"a",
".",
"contents",
"[",
"0",
"]",
",",
"\"url\"",
":",
"a",
"[",
"'href'",
"]",
"}",
")",
"return",
"data"
] |
Gets data from repaircafe_org.
|
[
"Gets",
"data",
"from",
"repaircafe_org",
"."
] |
b5838440174f10d370abb671358db9a99d7739fd
|
https://github.com/openp2pdesign/makerlabs/blob/b5838440174f10d370abb671358db9a99d7739fd/makerlabs/repaircafe_org.py#L44-L79
|
240,413
|
openp2pdesign/makerlabs
|
makerlabs/repaircafe_org.py
|
get_labs
|
def get_labs(format):
"""Gets Repair Cafe data from repairecafe.org."""
data = data_from_repaircafe_org()
repaircafes = {}
# Load all the Repair Cafes
for i in data:
# Create a lab
current_lab = RepairCafe()
# Add existing data from first scraping
current_lab.name = i["name"]
slug = i["url"].replace("https://repaircafe.org/locations/", "")
if slug.endswith("/"):
slug.replace("/", "")
current_lab.slug = slug
current_lab.url = i["url"]
# Scrape for more data
page_request = requests.get(i["url"])
if page_request.status_code == 200:
page_source = BeautifulSoup(page_request.text, "lxml")
else:
output = "There was an error while accessing data on repaircafe.org."
# Find Facebook and Twitter links, add also the other ones
current_lab.links = {"facebook": "", "twitter": ""}
column = page_source.find_all("div", class_="sc_column_item_2")
for j in column:
for p in j.find_all('p'):
for a in p.find_all('a', href=True):
if "facebook" in a['href']:
current_lab.links["facebook"] = a['href']
elif "twitter" in a['href']:
current_lab.links["twitter"] = a['href']
else:
current_lab.links[a['href']] = a['href']
# Find address
column = page_source.find_all("div", class_="sc_column_item_1")
for x in column:
if x.string:
print x.string.strip()
exit()
# current_lab.address_1 = i["address_1"]
# current_lab.address_2 = i["address_2"]
# current_lab.address_notes = i["address_notes"]
# current_lab.blurb = i["blurb"]
# current_lab.city = i["city"]
# current_lab.country_code = i["country_code"]
# current_lab.county = i["county"]
# current_lab.description = i["description"]
# current_lab.email = i["email"]
# current_lab.id = i["id"]
# current_lab.phone = i["phone"]
# current_lab.postal_code = i["postal_code"]
#
#
# current_lab.continent = country_alpha2_to_continent_code(i[
# "country_code"].upper())
# current_country = pycountry.countries.get(
# alpha_2=i["country_code"].upper())
# current_lab.country_code = current_country.alpha_3
# current_lab.country = current_country.name
# if i["longitude"] is None or i["latitude"] is None:
# # Be nice with the geocoder API limit
# errorsb += 1
# # sleep(10)
# # location = geolocator.geocode(
# # {"city": i["city"],
# # "country": i["country_code"].upper()},
# # addressdetails=True,
# # language="en")
# # if location is not None:
# # current_lab.latitude = location.latitude
# # current_lab.longitude = location.longitude
# # if "county" in location.raw["address"]:
# # current_lab.county = location.raw["address"][
# # "county"].encode('utf-8')
# # if "state" in location.raw["address"]:
# # current_lab.state = location.raw["address"][
# # "state"].encode('utf-8')
# else:
# # Be nice with the geocoder API limit
# sleep(10)
# errorsa += 1
# # location = geolocator.reverse((i["latitude"], i["longitude"]))
# # if location is not None:
# # if "county" in location.raw["address"]:
# # current_lab.county = location.raw["address"][
# # "county"].encode('utf-8')
# # if "state" in location.raw["address"]:
# # current_lab.state = location.raw["address"][
# # "state"].encode('utf-8')
# Add the lab to the list
repaircafes[slug] = current_lab
# Return a dictiornary / json
if format.lower() == "dict" or format.lower() == "json":
output = {}
for j in repaircafes:
output[j] = repaircafes[j].__dict__
# Return a geojson
elif format.lower() == "geojson" or format.lower() == "geo":
labs_list = []
for l in repaircafes:
single = repaircafes[l].__dict__
single_lab = Feature(
type="Feature",
geometry=Point((single["latitude"], single["longitude"])),
properties=single)
labs_list.append(single_lab)
output = dumps(FeatureCollection(labs_list))
# Return a Pandas DataFrame
elif format.lower() == "pandas" or format.lower() == "dataframe":
output = {}
for j in repaircafes:
output[j] = repaircafes[j].__dict__
# Transform the dict into a Pandas DataFrame
output = pd.DataFrame.from_dict(output)
output = output.transpose()
# Return an object
elif format.lower() == "object" or format.lower() == "obj":
output = repaircafes
# Default: return an oject
else:
output = repaircafes
# Return a proper json
if format.lower() == "json":
output = json.dumps(output)
return output
|
python
|
def get_labs(format):
"""Gets Repair Cafe data from repairecafe.org."""
data = data_from_repaircafe_org()
repaircafes = {}
# Load all the Repair Cafes
for i in data:
# Create a lab
current_lab = RepairCafe()
# Add existing data from first scraping
current_lab.name = i["name"]
slug = i["url"].replace("https://repaircafe.org/locations/", "")
if slug.endswith("/"):
slug.replace("/", "")
current_lab.slug = slug
current_lab.url = i["url"]
# Scrape for more data
page_request = requests.get(i["url"])
if page_request.status_code == 200:
page_source = BeautifulSoup(page_request.text, "lxml")
else:
output = "There was an error while accessing data on repaircafe.org."
# Find Facebook and Twitter links, add also the other ones
current_lab.links = {"facebook": "", "twitter": ""}
column = page_source.find_all("div", class_="sc_column_item_2")
for j in column:
for p in j.find_all('p'):
for a in p.find_all('a', href=True):
if "facebook" in a['href']:
current_lab.links["facebook"] = a['href']
elif "twitter" in a['href']:
current_lab.links["twitter"] = a['href']
else:
current_lab.links[a['href']] = a['href']
# Find address
column = page_source.find_all("div", class_="sc_column_item_1")
for x in column:
if x.string:
print x.string.strip()
exit()
# current_lab.address_1 = i["address_1"]
# current_lab.address_2 = i["address_2"]
# current_lab.address_notes = i["address_notes"]
# current_lab.blurb = i["blurb"]
# current_lab.city = i["city"]
# current_lab.country_code = i["country_code"]
# current_lab.county = i["county"]
# current_lab.description = i["description"]
# current_lab.email = i["email"]
# current_lab.id = i["id"]
# current_lab.phone = i["phone"]
# current_lab.postal_code = i["postal_code"]
#
#
# current_lab.continent = country_alpha2_to_continent_code(i[
# "country_code"].upper())
# current_country = pycountry.countries.get(
# alpha_2=i["country_code"].upper())
# current_lab.country_code = current_country.alpha_3
# current_lab.country = current_country.name
# if i["longitude"] is None or i["latitude"] is None:
# # Be nice with the geocoder API limit
# errorsb += 1
# # sleep(10)
# # location = geolocator.geocode(
# # {"city": i["city"],
# # "country": i["country_code"].upper()},
# # addressdetails=True,
# # language="en")
# # if location is not None:
# # current_lab.latitude = location.latitude
# # current_lab.longitude = location.longitude
# # if "county" in location.raw["address"]:
# # current_lab.county = location.raw["address"][
# # "county"].encode('utf-8')
# # if "state" in location.raw["address"]:
# # current_lab.state = location.raw["address"][
# # "state"].encode('utf-8')
# else:
# # Be nice with the geocoder API limit
# sleep(10)
# errorsa += 1
# # location = geolocator.reverse((i["latitude"], i["longitude"]))
# # if location is not None:
# # if "county" in location.raw["address"]:
# # current_lab.county = location.raw["address"][
# # "county"].encode('utf-8')
# # if "state" in location.raw["address"]:
# # current_lab.state = location.raw["address"][
# # "state"].encode('utf-8')
# Add the lab to the list
repaircafes[slug] = current_lab
# Return a dictiornary / json
if format.lower() == "dict" or format.lower() == "json":
output = {}
for j in repaircafes:
output[j] = repaircafes[j].__dict__
# Return a geojson
elif format.lower() == "geojson" or format.lower() == "geo":
labs_list = []
for l in repaircafes:
single = repaircafes[l].__dict__
single_lab = Feature(
type="Feature",
geometry=Point((single["latitude"], single["longitude"])),
properties=single)
labs_list.append(single_lab)
output = dumps(FeatureCollection(labs_list))
# Return a Pandas DataFrame
elif format.lower() == "pandas" or format.lower() == "dataframe":
output = {}
for j in repaircafes:
output[j] = repaircafes[j].__dict__
# Transform the dict into a Pandas DataFrame
output = pd.DataFrame.from_dict(output)
output = output.transpose()
# Return an object
elif format.lower() == "object" or format.lower() == "obj":
output = repaircafes
# Default: return an oject
else:
output = repaircafes
# Return a proper json
if format.lower() == "json":
output = json.dumps(output)
return output
|
[
"def",
"get_labs",
"(",
"format",
")",
":",
"data",
"=",
"data_from_repaircafe_org",
"(",
")",
"repaircafes",
"=",
"{",
"}",
"# Load all the Repair Cafes",
"for",
"i",
"in",
"data",
":",
"# Create a lab",
"current_lab",
"=",
"RepairCafe",
"(",
")",
"# Add existing data from first scraping",
"current_lab",
".",
"name",
"=",
"i",
"[",
"\"name\"",
"]",
"slug",
"=",
"i",
"[",
"\"url\"",
"]",
".",
"replace",
"(",
"\"https://repaircafe.org/locations/\"",
",",
"\"\"",
")",
"if",
"slug",
".",
"endswith",
"(",
"\"/\"",
")",
":",
"slug",
".",
"replace",
"(",
"\"/\"",
",",
"\"\"",
")",
"current_lab",
".",
"slug",
"=",
"slug",
"current_lab",
".",
"url",
"=",
"i",
"[",
"\"url\"",
"]",
"# Scrape for more data",
"page_request",
"=",
"requests",
".",
"get",
"(",
"i",
"[",
"\"url\"",
"]",
")",
"if",
"page_request",
".",
"status_code",
"==",
"200",
":",
"page_source",
"=",
"BeautifulSoup",
"(",
"page_request",
".",
"text",
",",
"\"lxml\"",
")",
"else",
":",
"output",
"=",
"\"There was an error while accessing data on repaircafe.org.\"",
"# Find Facebook and Twitter links, add also the other ones",
"current_lab",
".",
"links",
"=",
"{",
"\"facebook\"",
":",
"\"\"",
",",
"\"twitter\"",
":",
"\"\"",
"}",
"column",
"=",
"page_source",
".",
"find_all",
"(",
"\"div\"",
",",
"class_",
"=",
"\"sc_column_item_2\"",
")",
"for",
"j",
"in",
"column",
":",
"for",
"p",
"in",
"j",
".",
"find_all",
"(",
"'p'",
")",
":",
"for",
"a",
"in",
"p",
".",
"find_all",
"(",
"'a'",
",",
"href",
"=",
"True",
")",
":",
"if",
"\"facebook\"",
"in",
"a",
"[",
"'href'",
"]",
":",
"current_lab",
".",
"links",
"[",
"\"facebook\"",
"]",
"=",
"a",
"[",
"'href'",
"]",
"elif",
"\"twitter\"",
"in",
"a",
"[",
"'href'",
"]",
":",
"current_lab",
".",
"links",
"[",
"\"twitter\"",
"]",
"=",
"a",
"[",
"'href'",
"]",
"else",
":",
"current_lab",
".",
"links",
"[",
"a",
"[",
"'href'",
"]",
"]",
"=",
"a",
"[",
"'href'",
"]",
"# Find address",
"column",
"=",
"page_source",
".",
"find_all",
"(",
"\"div\"",
",",
"class_",
"=",
"\"sc_column_item_1\"",
")",
"for",
"x",
"in",
"column",
":",
"if",
"x",
".",
"string",
":",
"print",
"x",
".",
"string",
".",
"strip",
"(",
")",
"exit",
"(",
")",
"# current_lab.address_1 = i[\"address_1\"]",
"# current_lab.address_2 = i[\"address_2\"]",
"# current_lab.address_notes = i[\"address_notes\"]",
"# current_lab.blurb = i[\"blurb\"]",
"# current_lab.city = i[\"city\"]",
"# current_lab.country_code = i[\"country_code\"]",
"# current_lab.county = i[\"county\"]",
"# current_lab.description = i[\"description\"]",
"# current_lab.email = i[\"email\"]",
"# current_lab.id = i[\"id\"]",
"# current_lab.phone = i[\"phone\"]",
"# current_lab.postal_code = i[\"postal_code\"]",
"#",
"#",
"# current_lab.continent = country_alpha2_to_continent_code(i[",
"# \"country_code\"].upper())",
"# current_country = pycountry.countries.get(",
"# alpha_2=i[\"country_code\"].upper())",
"# current_lab.country_code = current_country.alpha_3",
"# current_lab.country = current_country.name",
"# if i[\"longitude\"] is None or i[\"latitude\"] is None:",
"# # Be nice with the geocoder API limit",
"# errorsb += 1",
"# # sleep(10)",
"# # location = geolocator.geocode(",
"# # {\"city\": i[\"city\"],",
"# # \"country\": i[\"country_code\"].upper()},",
"# # addressdetails=True,",
"# # language=\"en\")",
"# # if location is not None:",
"# # current_lab.latitude = location.latitude",
"# # current_lab.longitude = location.longitude",
"# # if \"county\" in location.raw[\"address\"]:",
"# # current_lab.county = location.raw[\"address\"][",
"# # \"county\"].encode('utf-8')",
"# # if \"state\" in location.raw[\"address\"]:",
"# # current_lab.state = location.raw[\"address\"][",
"# # \"state\"].encode('utf-8')",
"# else:",
"# # Be nice with the geocoder API limit",
"# sleep(10)",
"# errorsa += 1",
"# # location = geolocator.reverse((i[\"latitude\"], i[\"longitude\"]))",
"# # if location is not None:",
"# # if \"county\" in location.raw[\"address\"]:",
"# # current_lab.county = location.raw[\"address\"][",
"# # \"county\"].encode('utf-8')",
"# # if \"state\" in location.raw[\"address\"]:",
"# # current_lab.state = location.raw[\"address\"][",
"# # \"state\"].encode('utf-8')",
"# Add the lab to the list",
"repaircafes",
"[",
"slug",
"]",
"=",
"current_lab",
"# Return a dictiornary / json",
"if",
"format",
".",
"lower",
"(",
")",
"==",
"\"dict\"",
"or",
"format",
".",
"lower",
"(",
")",
"==",
"\"json\"",
":",
"output",
"=",
"{",
"}",
"for",
"j",
"in",
"repaircafes",
":",
"output",
"[",
"j",
"]",
"=",
"repaircafes",
"[",
"j",
"]",
".",
"__dict__",
"# Return a geojson",
"elif",
"format",
".",
"lower",
"(",
")",
"==",
"\"geojson\"",
"or",
"format",
".",
"lower",
"(",
")",
"==",
"\"geo\"",
":",
"labs_list",
"=",
"[",
"]",
"for",
"l",
"in",
"repaircafes",
":",
"single",
"=",
"repaircafes",
"[",
"l",
"]",
".",
"__dict__",
"single_lab",
"=",
"Feature",
"(",
"type",
"=",
"\"Feature\"",
",",
"geometry",
"=",
"Point",
"(",
"(",
"single",
"[",
"\"latitude\"",
"]",
",",
"single",
"[",
"\"longitude\"",
"]",
")",
")",
",",
"properties",
"=",
"single",
")",
"labs_list",
".",
"append",
"(",
"single_lab",
")",
"output",
"=",
"dumps",
"(",
"FeatureCollection",
"(",
"labs_list",
")",
")",
"# Return a Pandas DataFrame",
"elif",
"format",
".",
"lower",
"(",
")",
"==",
"\"pandas\"",
"or",
"format",
".",
"lower",
"(",
")",
"==",
"\"dataframe\"",
":",
"output",
"=",
"{",
"}",
"for",
"j",
"in",
"repaircafes",
":",
"output",
"[",
"j",
"]",
"=",
"repaircafes",
"[",
"j",
"]",
".",
"__dict__",
"# Transform the dict into a Pandas DataFrame",
"output",
"=",
"pd",
".",
"DataFrame",
".",
"from_dict",
"(",
"output",
")",
"output",
"=",
"output",
".",
"transpose",
"(",
")",
"# Return an object",
"elif",
"format",
".",
"lower",
"(",
")",
"==",
"\"object\"",
"or",
"format",
".",
"lower",
"(",
")",
"==",
"\"obj\"",
":",
"output",
"=",
"repaircafes",
"# Default: return an oject",
"else",
":",
"output",
"=",
"repaircafes",
"# Return a proper json",
"if",
"format",
".",
"lower",
"(",
")",
"==",
"\"json\"",
":",
"output",
"=",
"json",
".",
"dumps",
"(",
"output",
")",
"return",
"output"
] |
Gets Repair Cafe data from repairecafe.org.
|
[
"Gets",
"Repair",
"Cafe",
"data",
"from",
"repairecafe",
".",
"org",
"."
] |
b5838440174f10d370abb671358db9a99d7739fd
|
https://github.com/openp2pdesign/makerlabs/blob/b5838440174f10d370abb671358db9a99d7739fd/makerlabs/repaircafe_org.py#L82-L214
|
240,414
|
aquatix/python-utilkit
|
utilkit/fileutil.py
|
archive_if_exists
|
def archive_if_exists(filename):
"""
Move `filename` out of the way, archiving it by appending the current datetime
Can be a file or a directory
"""
if os.path.exists(filename):
current_time = datetime.datetime.now()
dt_format = '%Y-%m-%dT%H:%M:%S%z'
timestamp = current_time.strftime(dt_format)
dst = filename + '_' + timestamp
shutil.move(filename, dst)
|
python
|
def archive_if_exists(filename):
"""
Move `filename` out of the way, archiving it by appending the current datetime
Can be a file or a directory
"""
if os.path.exists(filename):
current_time = datetime.datetime.now()
dt_format = '%Y-%m-%dT%H:%M:%S%z'
timestamp = current_time.strftime(dt_format)
dst = filename + '_' + timestamp
shutil.move(filename, dst)
|
[
"def",
"archive_if_exists",
"(",
"filename",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"current_time",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"dt_format",
"=",
"'%Y-%m-%dT%H:%M:%S%z'",
"timestamp",
"=",
"current_time",
".",
"strftime",
"(",
"dt_format",
")",
"dst",
"=",
"filename",
"+",
"'_'",
"+",
"timestamp",
"shutil",
".",
"move",
"(",
"filename",
",",
"dst",
")"
] |
Move `filename` out of the way, archiving it by appending the current datetime
Can be a file or a directory
|
[
"Move",
"filename",
"out",
"of",
"the",
"way",
"archiving",
"it",
"by",
"appending",
"the",
"current",
"datetime",
"Can",
"be",
"a",
"file",
"or",
"a",
"directory"
] |
1b4a4175381d2175592208619315f399610f915c
|
https://github.com/aquatix/python-utilkit/blob/1b4a4175381d2175592208619315f399610f915c/utilkit/fileutil.py#L45-L55
|
240,415
|
aquatix/python-utilkit
|
utilkit/fileutil.py
|
list_files
|
def list_files(dirname, extension=None):
"""
List all files in directory `dirname`, option to filter on file extension
"""
f = []
for (dirpath, dirnames, filenames) in os.walk(dirname):
f.extend(filenames)
break
if extension is not None:
# Filter on extension
filtered = []
for filename in f:
fn, ext = os.path.splitext(filename)
if ext.lower() == '.' + extension.lower():
filtered.append(filename)
f = filtered
return f
|
python
|
def list_files(dirname, extension=None):
"""
List all files in directory `dirname`, option to filter on file extension
"""
f = []
for (dirpath, dirnames, filenames) in os.walk(dirname):
f.extend(filenames)
break
if extension is not None:
# Filter on extension
filtered = []
for filename in f:
fn, ext = os.path.splitext(filename)
if ext.lower() == '.' + extension.lower():
filtered.append(filename)
f = filtered
return f
|
[
"def",
"list_files",
"(",
"dirname",
",",
"extension",
"=",
"None",
")",
":",
"f",
"=",
"[",
"]",
"for",
"(",
"dirpath",
",",
"dirnames",
",",
"filenames",
")",
"in",
"os",
".",
"walk",
"(",
"dirname",
")",
":",
"f",
".",
"extend",
"(",
"filenames",
")",
"break",
"if",
"extension",
"is",
"not",
"None",
":",
"# Filter on extension",
"filtered",
"=",
"[",
"]",
"for",
"filename",
"in",
"f",
":",
"fn",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"if",
"ext",
".",
"lower",
"(",
")",
"==",
"'.'",
"+",
"extension",
".",
"lower",
"(",
")",
":",
"filtered",
".",
"append",
"(",
"filename",
")",
"f",
"=",
"filtered",
"return",
"f"
] |
List all files in directory `dirname`, option to filter on file extension
|
[
"List",
"all",
"files",
"in",
"directory",
"dirname",
"option",
"to",
"filter",
"on",
"file",
"extension"
] |
1b4a4175381d2175592208619315f399610f915c
|
https://github.com/aquatix/python-utilkit/blob/1b4a4175381d2175592208619315f399610f915c/utilkit/fileutil.py#L72-L88
|
240,416
|
aquatix/python-utilkit
|
utilkit/fileutil.py
|
filename_addstring
|
def filename_addstring(filename, text):
"""
Add `text` to filename, keeping the extension in place
For example when adding a timestamp to the filename
"""
fn, ext = os.path.splitext(filename)
return fn + text + ext
|
python
|
def filename_addstring(filename, text):
"""
Add `text` to filename, keeping the extension in place
For example when adding a timestamp to the filename
"""
fn, ext = os.path.splitext(filename)
return fn + text + ext
|
[
"def",
"filename_addstring",
"(",
"filename",
",",
"text",
")",
":",
"fn",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"return",
"fn",
"+",
"text",
"+",
"ext"
] |
Add `text` to filename, keeping the extension in place
For example when adding a timestamp to the filename
|
[
"Add",
"text",
"to",
"filename",
"keeping",
"the",
"extension",
"in",
"place",
"For",
"example",
"when",
"adding",
"a",
"timestamp",
"to",
"the",
"filename"
] |
1b4a4175381d2175592208619315f399610f915c
|
https://github.com/aquatix/python-utilkit/blob/1b4a4175381d2175592208619315f399610f915c/utilkit/fileutil.py#L91-L97
|
240,417
|
aquatix/python-utilkit
|
utilkit/fileutil.py
|
get_file_contents
|
def get_file_contents(filename):
"""
Read file contents from file `filename`
"""
data = None
try:
with open(filename) as pf:
data = pf.read()
except IOError:
# File not found, return None
pass
return data
|
python
|
def get_file_contents(filename):
"""
Read file contents from file `filename`
"""
data = None
try:
with open(filename) as pf:
data = pf.read()
except IOError:
# File not found, return None
pass
return data
|
[
"def",
"get_file_contents",
"(",
"filename",
")",
":",
"data",
"=",
"None",
"try",
":",
"with",
"open",
"(",
"filename",
")",
"as",
"pf",
":",
"data",
"=",
"pf",
".",
"read",
"(",
")",
"except",
"IOError",
":",
"# File not found, return None",
"pass",
"return",
"data"
] |
Read file contents from file `filename`
|
[
"Read",
"file",
"contents",
"from",
"file",
"filename"
] |
1b4a4175381d2175592208619315f399610f915c
|
https://github.com/aquatix/python-utilkit/blob/1b4a4175381d2175592208619315f399610f915c/utilkit/fileutil.py#L100-L111
|
240,418
|
AndresMWeber/Nomenclate
|
nomenclate/core/nameparser.py
|
NameParser.parse_name
|
def parse_name(cls, name):
""" Parses a name into a dictionary of identified subsections with accompanying information to
correctly identify and replace if necessary
:param name: str, string to be parsed
:return: dict, dictionary with relevant parsed information
"""
parse_dict = dict.fromkeys(cls.PARSABLE, None)
parse_dict['date'] = cls.get_date(name)
parse_dict['version'] = cls.get_version(name)
parse_dict['udim'] = cls.get_udim(name)
parse_dict['side'] = cls.get_side(name)
parse_dict['basename'] = cls.get_base_naive(cls._reduce_name(name, parse_dict))
return parse_dict
|
python
|
def parse_name(cls, name):
""" Parses a name into a dictionary of identified subsections with accompanying information to
correctly identify and replace if necessary
:param name: str, string to be parsed
:return: dict, dictionary with relevant parsed information
"""
parse_dict = dict.fromkeys(cls.PARSABLE, None)
parse_dict['date'] = cls.get_date(name)
parse_dict['version'] = cls.get_version(name)
parse_dict['udim'] = cls.get_udim(name)
parse_dict['side'] = cls.get_side(name)
parse_dict['basename'] = cls.get_base_naive(cls._reduce_name(name, parse_dict))
return parse_dict
|
[
"def",
"parse_name",
"(",
"cls",
",",
"name",
")",
":",
"parse_dict",
"=",
"dict",
".",
"fromkeys",
"(",
"cls",
".",
"PARSABLE",
",",
"None",
")",
"parse_dict",
"[",
"'date'",
"]",
"=",
"cls",
".",
"get_date",
"(",
"name",
")",
"parse_dict",
"[",
"'version'",
"]",
"=",
"cls",
".",
"get_version",
"(",
"name",
")",
"parse_dict",
"[",
"'udim'",
"]",
"=",
"cls",
".",
"get_udim",
"(",
"name",
")",
"parse_dict",
"[",
"'side'",
"]",
"=",
"cls",
".",
"get_side",
"(",
"name",
")",
"parse_dict",
"[",
"'basename'",
"]",
"=",
"cls",
".",
"get_base_naive",
"(",
"cls",
".",
"_reduce_name",
"(",
"name",
",",
"parse_dict",
")",
")",
"return",
"parse_dict"
] |
Parses a name into a dictionary of identified subsections with accompanying information to
correctly identify and replace if necessary
:param name: str, string to be parsed
:return: dict, dictionary with relevant parsed information
|
[
"Parses",
"a",
"name",
"into",
"a",
"dictionary",
"of",
"identified",
"subsections",
"with",
"accompanying",
"information",
"to",
"correctly",
"identify",
"and",
"replace",
"if",
"necessary"
] |
e6d6fc28beac042bad588e56fbe77531d2de6b6f
|
https://github.com/AndresMWeber/Nomenclate/blob/e6d6fc28beac042bad588e56fbe77531d2de6b6f/nomenclate/core/nameparser.py#L35-L48
|
240,419
|
AndresMWeber/Nomenclate
|
nomenclate/core/nameparser.py
|
NameParser.get_side
|
def get_side(cls, name, ignore=''):
""" Checks a string for a possible side string token, this assumes its on its own
and is not part of or camel cased and combined with a word. Returns first found side to reduce duplicates.
We can be safe to assume the abbreviation for the side does not have camel casing within its own word.
:param name: str, string that represents a possible name of an object
:param name: str, string that represents a possible name of an object
:return: (None, str), either the found permutation of the side found in name or None
"""
for side in cls.CONFIG_SIDES:
""" Tried using a regex, however it would've taken too long to debug
side_regex = cls._build_abbreviation_regex(side)
result = cls._generic_search(name, side_regex, metadata={'side': side}, ignore=ignore)
if result:
return result
"""
for permutations in cls.get_string_camel_patterns(side):
for permutation in permutations:
result = cls._generic_search(name, permutation, metadata={'side': side}, ignore=ignore)
if result:
return result
return None
|
python
|
def get_side(cls, name, ignore=''):
""" Checks a string for a possible side string token, this assumes its on its own
and is not part of or camel cased and combined with a word. Returns first found side to reduce duplicates.
We can be safe to assume the abbreviation for the side does not have camel casing within its own word.
:param name: str, string that represents a possible name of an object
:param name: str, string that represents a possible name of an object
:return: (None, str), either the found permutation of the side found in name or None
"""
for side in cls.CONFIG_SIDES:
""" Tried using a regex, however it would've taken too long to debug
side_regex = cls._build_abbreviation_regex(side)
result = cls._generic_search(name, side_regex, metadata={'side': side}, ignore=ignore)
if result:
return result
"""
for permutations in cls.get_string_camel_patterns(side):
for permutation in permutations:
result = cls._generic_search(name, permutation, metadata={'side': side}, ignore=ignore)
if result:
return result
return None
|
[
"def",
"get_side",
"(",
"cls",
",",
"name",
",",
"ignore",
"=",
"''",
")",
":",
"for",
"side",
"in",
"cls",
".",
"CONFIG_SIDES",
":",
"\"\"\" Tried using a regex, however it would've taken too long to debug\n side_regex = cls._build_abbreviation_regex(side)\n result = cls._generic_search(name, side_regex, metadata={'side': side}, ignore=ignore)\n if result:\n return result\n \"\"\"",
"for",
"permutations",
"in",
"cls",
".",
"get_string_camel_patterns",
"(",
"side",
")",
":",
"for",
"permutation",
"in",
"permutations",
":",
"result",
"=",
"cls",
".",
"_generic_search",
"(",
"name",
",",
"permutation",
",",
"metadata",
"=",
"{",
"'side'",
":",
"side",
"}",
",",
"ignore",
"=",
"ignore",
")",
"if",
"result",
":",
"return",
"result",
"return",
"None"
] |
Checks a string for a possible side string token, this assumes its on its own
and is not part of or camel cased and combined with a word. Returns first found side to reduce duplicates.
We can be safe to assume the abbreviation for the side does not have camel casing within its own word.
:param name: str, string that represents a possible name of an object
:param name: str, string that represents a possible name of an object
:return: (None, str), either the found permutation of the side found in name or None
|
[
"Checks",
"a",
"string",
"for",
"a",
"possible",
"side",
"string",
"token",
"this",
"assumes",
"its",
"on",
"its",
"own",
"and",
"is",
"not",
"part",
"of",
"or",
"camel",
"cased",
"and",
"combined",
"with",
"a",
"word",
".",
"Returns",
"first",
"found",
"side",
"to",
"reduce",
"duplicates",
".",
"We",
"can",
"be",
"safe",
"to",
"assume",
"the",
"abbreviation",
"for",
"the",
"side",
"does",
"not",
"have",
"camel",
"casing",
"within",
"its",
"own",
"word",
"."
] |
e6d6fc28beac042bad588e56fbe77531d2de6b6f
|
https://github.com/AndresMWeber/Nomenclate/blob/e6d6fc28beac042bad588e56fbe77531d2de6b6f/nomenclate/core/nameparser.py#L51-L72
|
240,420
|
AndresMWeber/Nomenclate
|
nomenclate/core/nameparser.py
|
NameParser.get_discipline
|
def get_discipline(cls, name, ignore='', min_length=3):
""" Checks a string for a possible discipline string token, this assumes its on its own
and is not part of or camel cased and combined with a word. Returns first found match to reduce duplicates.
We can be safe to assume the abbreviation for the discipline does not have camel casing within its own word.
:param name: str, the string based object name
:param ignore: str, specific ignore string for the search to avoid
:param min_length: int, minimum length for possible abbreviations of disciplines. Lower = more wrong guesses.
:return: dict, match dictionary
"""
for discipline in cls.CONFIG_DISCIPLINES:
re_abbr = '({RECURSE}(?=[0-9]|[A-Z]|{SEPARATORS}))'.format(
RECURSE=cls._build_abbreviation_regex(discipline),
SEPARATORS=cls.REGEX_SEPARATORS)
matches = cls._get_regex_search(name, re_abbr, ignore=ignore)
if matches:
matches = [m for m in matches if
re.findall('([a-z]{%d,})' % min_length, m['match'], flags=re.IGNORECASE)]
if matches:
return matches[-1]
return None
|
python
|
def get_discipline(cls, name, ignore='', min_length=3):
""" Checks a string for a possible discipline string token, this assumes its on its own
and is not part of or camel cased and combined with a word. Returns first found match to reduce duplicates.
We can be safe to assume the abbreviation for the discipline does not have camel casing within its own word.
:param name: str, the string based object name
:param ignore: str, specific ignore string for the search to avoid
:param min_length: int, minimum length for possible abbreviations of disciplines. Lower = more wrong guesses.
:return: dict, match dictionary
"""
for discipline in cls.CONFIG_DISCIPLINES:
re_abbr = '({RECURSE}(?=[0-9]|[A-Z]|{SEPARATORS}))'.format(
RECURSE=cls._build_abbreviation_regex(discipline),
SEPARATORS=cls.REGEX_SEPARATORS)
matches = cls._get_regex_search(name, re_abbr, ignore=ignore)
if matches:
matches = [m for m in matches if
re.findall('([a-z]{%d,})' % min_length, m['match'], flags=re.IGNORECASE)]
if matches:
return matches[-1]
return None
|
[
"def",
"get_discipline",
"(",
"cls",
",",
"name",
",",
"ignore",
"=",
"''",
",",
"min_length",
"=",
"3",
")",
":",
"for",
"discipline",
"in",
"cls",
".",
"CONFIG_DISCIPLINES",
":",
"re_abbr",
"=",
"'({RECURSE}(?=[0-9]|[A-Z]|{SEPARATORS}))'",
".",
"format",
"(",
"RECURSE",
"=",
"cls",
".",
"_build_abbreviation_regex",
"(",
"discipline",
")",
",",
"SEPARATORS",
"=",
"cls",
".",
"REGEX_SEPARATORS",
")",
"matches",
"=",
"cls",
".",
"_get_regex_search",
"(",
"name",
",",
"re_abbr",
",",
"ignore",
"=",
"ignore",
")",
"if",
"matches",
":",
"matches",
"=",
"[",
"m",
"for",
"m",
"in",
"matches",
"if",
"re",
".",
"findall",
"(",
"'([a-z]{%d,})'",
"%",
"min_length",
",",
"m",
"[",
"'match'",
"]",
",",
"flags",
"=",
"re",
".",
"IGNORECASE",
")",
"]",
"if",
"matches",
":",
"return",
"matches",
"[",
"-",
"1",
"]",
"return",
"None"
] |
Checks a string for a possible discipline string token, this assumes its on its own
and is not part of or camel cased and combined with a word. Returns first found match to reduce duplicates.
We can be safe to assume the abbreviation for the discipline does not have camel casing within its own word.
:param name: str, the string based object name
:param ignore: str, specific ignore string for the search to avoid
:param min_length: int, minimum length for possible abbreviations of disciplines. Lower = more wrong guesses.
:return: dict, match dictionary
|
[
"Checks",
"a",
"string",
"for",
"a",
"possible",
"discipline",
"string",
"token",
"this",
"assumes",
"its",
"on",
"its",
"own",
"and",
"is",
"not",
"part",
"of",
"or",
"camel",
"cased",
"and",
"combined",
"with",
"a",
"word",
".",
"Returns",
"first",
"found",
"match",
"to",
"reduce",
"duplicates",
".",
"We",
"can",
"be",
"safe",
"to",
"assume",
"the",
"abbreviation",
"for",
"the",
"discipline",
"does",
"not",
"have",
"camel",
"casing",
"within",
"its",
"own",
"word",
"."
] |
e6d6fc28beac042bad588e56fbe77531d2de6b6f
|
https://github.com/AndresMWeber/Nomenclate/blob/e6d6fc28beac042bad588e56fbe77531d2de6b6f/nomenclate/core/nameparser.py#L75-L95
|
240,421
|
AndresMWeber/Nomenclate
|
nomenclate/core/nameparser.py
|
NameParser.get_string_camel_patterns
|
def get_string_camel_patterns(cls, name, min_length=0):
""" Finds all permutations of possible camel casing of the given name
:param name: str, the name we need to get all possible permutations and abbreviations for
:param min_length: int, minimum length we want for abbreviations
:return: list(list(str)), list casing permutations of list of abbreviations
"""
# Have to check for longest first and remove duplicates
patterns = []
abbreviations = list(set(cls._get_abbreviations(name, output_length=min_length)))
abbreviations.sort(key=len, reverse=True)
for abbr in abbreviations:
# We won't check for abbreviations that are stupid eg something with apparent camel casing within
# the word itself like LeF, sorting from:
# http://stackoverflow.com/questions/13954841/python-sort-upper-case-and-lower-case
casing_permutations = list(set(cls._get_casing_permutations(abbr)))
casing_permutations.sort(key=lambda v: (v.upper(), v[0].islower(), len(v)))
permutations = [permutation for permutation in casing_permutations if
cls.is_valid_camel(permutation) or len(permutation) <= 2]
if permutations:
patterns.append(permutations)
return patterns
|
python
|
def get_string_camel_patterns(cls, name, min_length=0):
""" Finds all permutations of possible camel casing of the given name
:param name: str, the name we need to get all possible permutations and abbreviations for
:param min_length: int, minimum length we want for abbreviations
:return: list(list(str)), list casing permutations of list of abbreviations
"""
# Have to check for longest first and remove duplicates
patterns = []
abbreviations = list(set(cls._get_abbreviations(name, output_length=min_length)))
abbreviations.sort(key=len, reverse=True)
for abbr in abbreviations:
# We won't check for abbreviations that are stupid eg something with apparent camel casing within
# the word itself like LeF, sorting from:
# http://stackoverflow.com/questions/13954841/python-sort-upper-case-and-lower-case
casing_permutations = list(set(cls._get_casing_permutations(abbr)))
casing_permutations.sort(key=lambda v: (v.upper(), v[0].islower(), len(v)))
permutations = [permutation for permutation in casing_permutations if
cls.is_valid_camel(permutation) or len(permutation) <= 2]
if permutations:
patterns.append(permutations)
return patterns
|
[
"def",
"get_string_camel_patterns",
"(",
"cls",
",",
"name",
",",
"min_length",
"=",
"0",
")",
":",
"# Have to check for longest first and remove duplicates",
"patterns",
"=",
"[",
"]",
"abbreviations",
"=",
"list",
"(",
"set",
"(",
"cls",
".",
"_get_abbreviations",
"(",
"name",
",",
"output_length",
"=",
"min_length",
")",
")",
")",
"abbreviations",
".",
"sort",
"(",
"key",
"=",
"len",
",",
"reverse",
"=",
"True",
")",
"for",
"abbr",
"in",
"abbreviations",
":",
"# We won't check for abbreviations that are stupid eg something with apparent camel casing within",
"# the word itself like LeF, sorting from:",
"# http://stackoverflow.com/questions/13954841/python-sort-upper-case-and-lower-case",
"casing_permutations",
"=",
"list",
"(",
"set",
"(",
"cls",
".",
"_get_casing_permutations",
"(",
"abbr",
")",
")",
")",
"casing_permutations",
".",
"sort",
"(",
"key",
"=",
"lambda",
"v",
":",
"(",
"v",
".",
"upper",
"(",
")",
",",
"v",
"[",
"0",
"]",
".",
"islower",
"(",
")",
",",
"len",
"(",
"v",
")",
")",
")",
"permutations",
"=",
"[",
"permutation",
"for",
"permutation",
"in",
"casing_permutations",
"if",
"cls",
".",
"is_valid_camel",
"(",
"permutation",
")",
"or",
"len",
"(",
"permutation",
")",
"<=",
"2",
"]",
"if",
"permutations",
":",
"patterns",
".",
"append",
"(",
"permutations",
")",
"return",
"patterns"
] |
Finds all permutations of possible camel casing of the given name
:param name: str, the name we need to get all possible permutations and abbreviations for
:param min_length: int, minimum length we want for abbreviations
:return: list(list(str)), list casing permutations of list of abbreviations
|
[
"Finds",
"all",
"permutations",
"of",
"possible",
"camel",
"casing",
"of",
"the",
"given",
"name"
] |
e6d6fc28beac042bad588e56fbe77531d2de6b6f
|
https://github.com/AndresMWeber/Nomenclate/blob/e6d6fc28beac042bad588e56fbe77531d2de6b6f/nomenclate/core/nameparser.py#L238-L261
|
240,422
|
AndresMWeber/Nomenclate
|
nomenclate/core/nameparser.py
|
NameParser._reduce_name
|
def _reduce_name(cls, name, parse_dict):
""" Reduces a name against matches found in a parse dictionary
:param name: str, name to be reduced
:param parse_dict: dict, dictionary of matches to reduce against
:return: str, reduced string
"""
# Now remove all found entries to make basename regex have an easier time
removal_indices = []
for section, match in iteritems(parse_dict):
try:
matches = []
if isinstance(match, dict) and 'compound_matches' in match:
matches = match.get('compound_matches')
elif not isinstance(match, list) and match is not None:
matches = [match]
for m in matches:
valid_slice = True
slice_a, slice_b = m.get('position')
# Adjust slice positions from previous slices
if removal_indices is []:
removal_indices.append((slice_a, slice_b))
for r_slice_a, r_slice_b in removal_indices:
if slice_a == r_slice_a and slice_b == r_slice_b:
valid_slice = False
if slice_a > r_slice_a or slice_a > r_slice_b or slice_b > r_slice_b or slice_b > r_slice_a:
slice_delta = r_slice_b - r_slice_a
slice_a -= slice_delta
slice_b -= slice_delta
if valid_slice:
name = cls._string_remove_slice(name, slice_a, slice_b)
removal_indices.append((slice_a, slice_b))
except (IndexError, TypeError):
pass
return name
|
python
|
def _reduce_name(cls, name, parse_dict):
""" Reduces a name against matches found in a parse dictionary
:param name: str, name to be reduced
:param parse_dict: dict, dictionary of matches to reduce against
:return: str, reduced string
"""
# Now remove all found entries to make basename regex have an easier time
removal_indices = []
for section, match in iteritems(parse_dict):
try:
matches = []
if isinstance(match, dict) and 'compound_matches' in match:
matches = match.get('compound_matches')
elif not isinstance(match, list) and match is not None:
matches = [match]
for m in matches:
valid_slice = True
slice_a, slice_b = m.get('position')
# Adjust slice positions from previous slices
if removal_indices is []:
removal_indices.append((slice_a, slice_b))
for r_slice_a, r_slice_b in removal_indices:
if slice_a == r_slice_a and slice_b == r_slice_b:
valid_slice = False
if slice_a > r_slice_a or slice_a > r_slice_b or slice_b > r_slice_b or slice_b > r_slice_a:
slice_delta = r_slice_b - r_slice_a
slice_a -= slice_delta
slice_b -= slice_delta
if valid_slice:
name = cls._string_remove_slice(name, slice_a, slice_b)
removal_indices.append((slice_a, slice_b))
except (IndexError, TypeError):
pass
return name
|
[
"def",
"_reduce_name",
"(",
"cls",
",",
"name",
",",
"parse_dict",
")",
":",
"# Now remove all found entries to make basename regex have an easier time",
"removal_indices",
"=",
"[",
"]",
"for",
"section",
",",
"match",
"in",
"iteritems",
"(",
"parse_dict",
")",
":",
"try",
":",
"matches",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"match",
",",
"dict",
")",
"and",
"'compound_matches'",
"in",
"match",
":",
"matches",
"=",
"match",
".",
"get",
"(",
"'compound_matches'",
")",
"elif",
"not",
"isinstance",
"(",
"match",
",",
"list",
")",
"and",
"match",
"is",
"not",
"None",
":",
"matches",
"=",
"[",
"match",
"]",
"for",
"m",
"in",
"matches",
":",
"valid_slice",
"=",
"True",
"slice_a",
",",
"slice_b",
"=",
"m",
".",
"get",
"(",
"'position'",
")",
"# Adjust slice positions from previous slices",
"if",
"removal_indices",
"is",
"[",
"]",
":",
"removal_indices",
".",
"append",
"(",
"(",
"slice_a",
",",
"slice_b",
")",
")",
"for",
"r_slice_a",
",",
"r_slice_b",
"in",
"removal_indices",
":",
"if",
"slice_a",
"==",
"r_slice_a",
"and",
"slice_b",
"==",
"r_slice_b",
":",
"valid_slice",
"=",
"False",
"if",
"slice_a",
">",
"r_slice_a",
"or",
"slice_a",
">",
"r_slice_b",
"or",
"slice_b",
">",
"r_slice_b",
"or",
"slice_b",
">",
"r_slice_a",
":",
"slice_delta",
"=",
"r_slice_b",
"-",
"r_slice_a",
"slice_a",
"-=",
"slice_delta",
"slice_b",
"-=",
"slice_delta",
"if",
"valid_slice",
":",
"name",
"=",
"cls",
".",
"_string_remove_slice",
"(",
"name",
",",
"slice_a",
",",
"slice_b",
")",
"removal_indices",
".",
"append",
"(",
"(",
"slice_a",
",",
"slice_b",
")",
")",
"except",
"(",
"IndexError",
",",
"TypeError",
")",
":",
"pass",
"return",
"name"
] |
Reduces a name against matches found in a parse dictionary
:param name: str, name to be reduced
:param parse_dict: dict, dictionary of matches to reduce against
:return: str, reduced string
|
[
"Reduces",
"a",
"name",
"against",
"matches",
"found",
"in",
"a",
"parse",
"dictionary"
] |
e6d6fc28beac042bad588e56fbe77531d2de6b6f
|
https://github.com/AndresMWeber/Nomenclate/blob/e6d6fc28beac042bad588e56fbe77531d2de6b6f/nomenclate/core/nameparser.py#L264-L301
|
240,423
|
AndresMWeber/Nomenclate
|
nomenclate/core/nameparser.py
|
NameParser._get_regex_search
|
def _get_regex_search(input_string, regex, metadata={}, match_index=None, ignore='', flags=0):
""" Using this so that all results from the functions return similar results
:param input_string: str, input string to be checked
:param regex: str, input regex to be compiled and searched with
:param match_index: (int, None), whether to get a specific match, if None returns all matches as list
:param metadata: dict, dictionary of extra meta tags needed to identify information
:return: list(dict), list of dictionaries if multiple hits or a specific entry or None
"""
generator = re.compile(regex, flags=flags).finditer(input_string)
matches = []
for obj in generator:
try:
span_a = obj.span(1)
group_a = obj.group(1)
except IndexError:
span_a = obj.span()
group_a = obj.group()
if obj.groups() == ('',):
# Not sure how to account for this situation yet, weird regex.
return True
if group_a not in ignore:
matches.append({'pattern': regex,
'input': input_string,
'position': span_a,
'position_full': obj.span(),
'match': group_a,
'match_full': obj.group()})
if matches:
for match in matches:
match.update(metadata)
if match_index is not None:
return matches[match_index]
return matches
return None
|
python
|
def _get_regex_search(input_string, regex, metadata={}, match_index=None, ignore='', flags=0):
""" Using this so that all results from the functions return similar results
:param input_string: str, input string to be checked
:param regex: str, input regex to be compiled and searched with
:param match_index: (int, None), whether to get a specific match, if None returns all matches as list
:param metadata: dict, dictionary of extra meta tags needed to identify information
:return: list(dict), list of dictionaries if multiple hits or a specific entry or None
"""
generator = re.compile(regex, flags=flags).finditer(input_string)
matches = []
for obj in generator:
try:
span_a = obj.span(1)
group_a = obj.group(1)
except IndexError:
span_a = obj.span()
group_a = obj.group()
if obj.groups() == ('',):
# Not sure how to account for this situation yet, weird regex.
return True
if group_a not in ignore:
matches.append({'pattern': regex,
'input': input_string,
'position': span_a,
'position_full': obj.span(),
'match': group_a,
'match_full': obj.group()})
if matches:
for match in matches:
match.update(metadata)
if match_index is not None:
return matches[match_index]
return matches
return None
|
[
"def",
"_get_regex_search",
"(",
"input_string",
",",
"regex",
",",
"metadata",
"=",
"{",
"}",
",",
"match_index",
"=",
"None",
",",
"ignore",
"=",
"''",
",",
"flags",
"=",
"0",
")",
":",
"generator",
"=",
"re",
".",
"compile",
"(",
"regex",
",",
"flags",
"=",
"flags",
")",
".",
"finditer",
"(",
"input_string",
")",
"matches",
"=",
"[",
"]",
"for",
"obj",
"in",
"generator",
":",
"try",
":",
"span_a",
"=",
"obj",
".",
"span",
"(",
"1",
")",
"group_a",
"=",
"obj",
".",
"group",
"(",
"1",
")",
"except",
"IndexError",
":",
"span_a",
"=",
"obj",
".",
"span",
"(",
")",
"group_a",
"=",
"obj",
".",
"group",
"(",
")",
"if",
"obj",
".",
"groups",
"(",
")",
"==",
"(",
"''",
",",
")",
":",
"# Not sure how to account for this situation yet, weird regex.",
"return",
"True",
"if",
"group_a",
"not",
"in",
"ignore",
":",
"matches",
".",
"append",
"(",
"{",
"'pattern'",
":",
"regex",
",",
"'input'",
":",
"input_string",
",",
"'position'",
":",
"span_a",
",",
"'position_full'",
":",
"obj",
".",
"span",
"(",
")",
",",
"'match'",
":",
"group_a",
",",
"'match_full'",
":",
"obj",
".",
"group",
"(",
")",
"}",
")",
"if",
"matches",
":",
"for",
"match",
"in",
"matches",
":",
"match",
".",
"update",
"(",
"metadata",
")",
"if",
"match_index",
"is",
"not",
"None",
":",
"return",
"matches",
"[",
"match_index",
"]",
"return",
"matches",
"return",
"None"
] |
Using this so that all results from the functions return similar results
:param input_string: str, input string to be checked
:param regex: str, input regex to be compiled and searched with
:param match_index: (int, None), whether to get a specific match, if None returns all matches as list
:param metadata: dict, dictionary of extra meta tags needed to identify information
:return: list(dict), list of dictionaries if multiple hits or a specific entry or None
|
[
"Using",
"this",
"so",
"that",
"all",
"results",
"from",
"the",
"functions",
"return",
"similar",
"results"
] |
e6d6fc28beac042bad588e56fbe77531d2de6b6f
|
https://github.com/AndresMWeber/Nomenclate/blob/e6d6fc28beac042bad588e56fbe77531d2de6b6f/nomenclate/core/nameparser.py#L304-L341
|
240,424
|
AndresMWeber/Nomenclate
|
nomenclate/core/nameparser.py
|
NameParser._generic_search
|
def _generic_search(cls, name, search_string, metadata={}, ignore=''):
""" Searches for a specific string given three types of regex search types. Also auto-checks for camel casing.
:param name: str, name of object in question
:param search_string: str, string to find and insert into the search regexes
:param metadata: dict, metadata to add to the result if we find a match
:param ignore: str, ignore specific string for the search
:return: dict, dictionary of search results
"""
patterns = [cls.REGEX_ABBR_SEOS,
cls.REGEX_ABBR_ISLAND,
cls.REGEX_ABBR_CAMEL]
if not search_string[0].isupper():
patterns.remove(cls.REGEX_ABBR_CAMEL)
for pattern in patterns:
search_result = cls._get_regex_search(name,
pattern.format(ABBR=search_string, SEP=cls.REGEX_SEPARATORS),
metadata=metadata,
match_index=0,
ignore=ignore)
if search_result is not None:
if cls.is_valid_camel(search_result.get('match_full'), strcmp=search_result.get('match')):
return search_result
return None
|
python
|
def _generic_search(cls, name, search_string, metadata={}, ignore=''):
""" Searches for a specific string given three types of regex search types. Also auto-checks for camel casing.
:param name: str, name of object in question
:param search_string: str, string to find and insert into the search regexes
:param metadata: dict, metadata to add to the result if we find a match
:param ignore: str, ignore specific string for the search
:return: dict, dictionary of search results
"""
patterns = [cls.REGEX_ABBR_SEOS,
cls.REGEX_ABBR_ISLAND,
cls.REGEX_ABBR_CAMEL]
if not search_string[0].isupper():
patterns.remove(cls.REGEX_ABBR_CAMEL)
for pattern in patterns:
search_result = cls._get_regex_search(name,
pattern.format(ABBR=search_string, SEP=cls.REGEX_SEPARATORS),
metadata=metadata,
match_index=0,
ignore=ignore)
if search_result is not None:
if cls.is_valid_camel(search_result.get('match_full'), strcmp=search_result.get('match')):
return search_result
return None
|
[
"def",
"_generic_search",
"(",
"cls",
",",
"name",
",",
"search_string",
",",
"metadata",
"=",
"{",
"}",
",",
"ignore",
"=",
"''",
")",
":",
"patterns",
"=",
"[",
"cls",
".",
"REGEX_ABBR_SEOS",
",",
"cls",
".",
"REGEX_ABBR_ISLAND",
",",
"cls",
".",
"REGEX_ABBR_CAMEL",
"]",
"if",
"not",
"search_string",
"[",
"0",
"]",
".",
"isupper",
"(",
")",
":",
"patterns",
".",
"remove",
"(",
"cls",
".",
"REGEX_ABBR_CAMEL",
")",
"for",
"pattern",
"in",
"patterns",
":",
"search_result",
"=",
"cls",
".",
"_get_regex_search",
"(",
"name",
",",
"pattern",
".",
"format",
"(",
"ABBR",
"=",
"search_string",
",",
"SEP",
"=",
"cls",
".",
"REGEX_SEPARATORS",
")",
",",
"metadata",
"=",
"metadata",
",",
"match_index",
"=",
"0",
",",
"ignore",
"=",
"ignore",
")",
"if",
"search_result",
"is",
"not",
"None",
":",
"if",
"cls",
".",
"is_valid_camel",
"(",
"search_result",
".",
"get",
"(",
"'match_full'",
")",
",",
"strcmp",
"=",
"search_result",
".",
"get",
"(",
"'match'",
")",
")",
":",
"return",
"search_result",
"return",
"None"
] |
Searches for a specific string given three types of regex search types. Also auto-checks for camel casing.
:param name: str, name of object in question
:param search_string: str, string to find and insert into the search regexes
:param metadata: dict, metadata to add to the result if we find a match
:param ignore: str, ignore specific string for the search
:return: dict, dictionary of search results
|
[
"Searches",
"for",
"a",
"specific",
"string",
"given",
"three",
"types",
"of",
"regex",
"search",
"types",
".",
"Also",
"auto",
"-",
"checks",
"for",
"camel",
"casing",
"."
] |
e6d6fc28beac042bad588e56fbe77531d2de6b6f
|
https://github.com/AndresMWeber/Nomenclate/blob/e6d6fc28beac042bad588e56fbe77531d2de6b6f/nomenclate/core/nameparser.py#L344-L369
|
240,425
|
AndresMWeber/Nomenclate
|
nomenclate/core/nameparser.py
|
NameParser._get_abbreviations
|
def _get_abbreviations(input_string, output_length=0):
""" Generates abbreviations for input_string
:param input_string: str, name of object
:param output_length: int, optional specific length of abbreviations, default is off
:return: list(str), list of all combinations that include the first letter (possible abbreviations)
"""
for i, j in itertools.combinations(range(len(input_string[1:]) + 1), 2):
abbr = input_string[0] + input_string[1:][i:j]
if len(abbr) >= output_length:
yield abbr
elif output_length == 0:
yield abbr
# Have to add the solitary letter as well
if not output_length or output_length == 1:
yield input_string[0]
|
python
|
def _get_abbreviations(input_string, output_length=0):
""" Generates abbreviations for input_string
:param input_string: str, name of object
:param output_length: int, optional specific length of abbreviations, default is off
:return: list(str), list of all combinations that include the first letter (possible abbreviations)
"""
for i, j in itertools.combinations(range(len(input_string[1:]) + 1), 2):
abbr = input_string[0] + input_string[1:][i:j]
if len(abbr) >= output_length:
yield abbr
elif output_length == 0:
yield abbr
# Have to add the solitary letter as well
if not output_length or output_length == 1:
yield input_string[0]
|
[
"def",
"_get_abbreviations",
"(",
"input_string",
",",
"output_length",
"=",
"0",
")",
":",
"for",
"i",
",",
"j",
"in",
"itertools",
".",
"combinations",
"(",
"range",
"(",
"len",
"(",
"input_string",
"[",
"1",
":",
"]",
")",
"+",
"1",
")",
",",
"2",
")",
":",
"abbr",
"=",
"input_string",
"[",
"0",
"]",
"+",
"input_string",
"[",
"1",
":",
"]",
"[",
"i",
":",
"j",
"]",
"if",
"len",
"(",
"abbr",
")",
">=",
"output_length",
":",
"yield",
"abbr",
"elif",
"output_length",
"==",
"0",
":",
"yield",
"abbr",
"# Have to add the solitary letter as well",
"if",
"not",
"output_length",
"or",
"output_length",
"==",
"1",
":",
"yield",
"input_string",
"[",
"0",
"]"
] |
Generates abbreviations for input_string
:param input_string: str, name of object
:param output_length: int, optional specific length of abbreviations, default is off
:return: list(str), list of all combinations that include the first letter (possible abbreviations)
|
[
"Generates",
"abbreviations",
"for",
"input_string"
] |
e6d6fc28beac042bad588e56fbe77531d2de6b6f
|
https://github.com/AndresMWeber/Nomenclate/blob/e6d6fc28beac042bad588e56fbe77531d2de6b6f/nomenclate/core/nameparser.py#L372-L387
|
240,426
|
AndresMWeber/Nomenclate
|
nomenclate/core/nameparser.py
|
NameParser._get_casing_permutations
|
def _get_casing_permutations(cls, input_string):
""" Takes a string and gives all possible permutations of casing for comparative purposes
:param input_string: str, name of object
:return: Generator(str), iterator of all possible permutations of casing for the input_string
"""
if not input_string:
yield ""
else:
first = input_string[:1]
for sub_casing in cls._get_casing_permutations(input_string[1:]):
yield first.lower() + sub_casing
yield first.upper() + sub_casing
|
python
|
def _get_casing_permutations(cls, input_string):
""" Takes a string and gives all possible permutations of casing for comparative purposes
:param input_string: str, name of object
:return: Generator(str), iterator of all possible permutations of casing for the input_string
"""
if not input_string:
yield ""
else:
first = input_string[:1]
for sub_casing in cls._get_casing_permutations(input_string[1:]):
yield first.lower() + sub_casing
yield first.upper() + sub_casing
|
[
"def",
"_get_casing_permutations",
"(",
"cls",
",",
"input_string",
")",
":",
"if",
"not",
"input_string",
":",
"yield",
"\"\"",
"else",
":",
"first",
"=",
"input_string",
"[",
":",
"1",
"]",
"for",
"sub_casing",
"in",
"cls",
".",
"_get_casing_permutations",
"(",
"input_string",
"[",
"1",
":",
"]",
")",
":",
"yield",
"first",
".",
"lower",
"(",
")",
"+",
"sub_casing",
"yield",
"first",
".",
"upper",
"(",
")",
"+",
"sub_casing"
] |
Takes a string and gives all possible permutations of casing for comparative purposes
:param input_string: str, name of object
:return: Generator(str), iterator of all possible permutations of casing for the input_string
|
[
"Takes",
"a",
"string",
"and",
"gives",
"all",
"possible",
"permutations",
"of",
"casing",
"for",
"comparative",
"purposes"
] |
e6d6fc28beac042bad588e56fbe77531d2de6b6f
|
https://github.com/AndresMWeber/Nomenclate/blob/e6d6fc28beac042bad588e56fbe77531d2de6b6f/nomenclate/core/nameparser.py#L433-L445
|
240,427
|
AndresMWeber/Nomenclate
|
nomenclate/core/nameparser.py
|
NameParser._string_remove_slice
|
def _string_remove_slice(input_str, start, end):
""" Removes portions of a string
:param input_str: str, input string
:param start: int, end search index
:param end: int, start search index
:return: str, the cut string
"""
if 0 <= start < end <= len(input_str):
return input_str[:start] + input_str[end:]
return input_str
|
python
|
def _string_remove_slice(input_str, start, end):
""" Removes portions of a string
:param input_str: str, input string
:param start: int, end search index
:param end: int, start search index
:return: str, the cut string
"""
if 0 <= start < end <= len(input_str):
return input_str[:start] + input_str[end:]
return input_str
|
[
"def",
"_string_remove_slice",
"(",
"input_str",
",",
"start",
",",
"end",
")",
":",
"if",
"0",
"<=",
"start",
"<",
"end",
"<=",
"len",
"(",
"input_str",
")",
":",
"return",
"input_str",
"[",
":",
"start",
"]",
"+",
"input_str",
"[",
"end",
":",
"]",
"return",
"input_str"
] |
Removes portions of a string
:param input_str: str, input string
:param start: int, end search index
:param end: int, start search index
:return: str, the cut string
|
[
"Removes",
"portions",
"of",
"a",
"string"
] |
e6d6fc28beac042bad588e56fbe77531d2de6b6f
|
https://github.com/AndresMWeber/Nomenclate/blob/e6d6fc28beac042bad588e56fbe77531d2de6b6f/nomenclate/core/nameparser.py#L448-L458
|
240,428
|
noobermin/lspreader
|
lspreader/flds.py
|
getvector
|
def getvector(d,s):
'''
Get a vector flds data.
Parameters:
-----------
d -- flds data.
s -- key for the data.
'''
return np.array([d[s+"x"],d[s+"y"],d[s+"z"]]);
|
python
|
def getvector(d,s):
'''
Get a vector flds data.
Parameters:
-----------
d -- flds data.
s -- key for the data.
'''
return np.array([d[s+"x"],d[s+"y"],d[s+"z"]]);
|
[
"def",
"getvector",
"(",
"d",
",",
"s",
")",
":",
"return",
"np",
".",
"array",
"(",
"[",
"d",
"[",
"s",
"+",
"\"x\"",
"]",
",",
"d",
"[",
"s",
"+",
"\"y\"",
"]",
",",
"d",
"[",
"s",
"+",
"\"z\"",
"]",
"]",
")"
] |
Get a vector flds data.
Parameters:
-----------
d -- flds data.
s -- key for the data.
|
[
"Get",
"a",
"vector",
"flds",
"data",
"."
] |
903b9d6427513b07986ffacf76cbca54e18d8be6
|
https://github.com/noobermin/lspreader/blob/903b9d6427513b07986ffacf76cbca54e18d8be6/lspreader/flds.py#L11-L21
|
240,429
|
noobermin/lspreader
|
lspreader/flds.py
|
restrict
|
def restrict(d,restrict):
'''
Restrict data by indices.
Parameters:
----------
d -- the flds/sclr data
restrict -- a tuple of [xmin,xmax,...] etx
'''
notqs = ['t','xs','ys','zs','fd','sd']
keys = [k for k in d if k not in notqs];
if len(restrict) == 2:
for k in keys:
d[k] = d[k][restrict[0]:restrict[1]]
elif len(restrict) == 4:
for k in keys:
d[k] = d[k][
restrict[0]:restrict[1],
restrict[2]:restrict[3]
];
elif len(restrict) == 6:
for k in keys:
d[k] = d[k][
restrict[0]:restrict[1],
restrict[2]:restrict[3],
restrict[4]:restrict[5]
];
else:
raise ValueError("restrict of length {} is not valid".format(
len(restrict)));
|
python
|
def restrict(d,restrict):
'''
Restrict data by indices.
Parameters:
----------
d -- the flds/sclr data
restrict -- a tuple of [xmin,xmax,...] etx
'''
notqs = ['t','xs','ys','zs','fd','sd']
keys = [k for k in d if k not in notqs];
if len(restrict) == 2:
for k in keys:
d[k] = d[k][restrict[0]:restrict[1]]
elif len(restrict) == 4:
for k in keys:
d[k] = d[k][
restrict[0]:restrict[1],
restrict[2]:restrict[3]
];
elif len(restrict) == 6:
for k in keys:
d[k] = d[k][
restrict[0]:restrict[1],
restrict[2]:restrict[3],
restrict[4]:restrict[5]
];
else:
raise ValueError("restrict of length {} is not valid".format(
len(restrict)));
|
[
"def",
"restrict",
"(",
"d",
",",
"restrict",
")",
":",
"notqs",
"=",
"[",
"'t'",
",",
"'xs'",
",",
"'ys'",
",",
"'zs'",
",",
"'fd'",
",",
"'sd'",
"]",
"keys",
"=",
"[",
"k",
"for",
"k",
"in",
"d",
"if",
"k",
"not",
"in",
"notqs",
"]",
"if",
"len",
"(",
"restrict",
")",
"==",
"2",
":",
"for",
"k",
"in",
"keys",
":",
"d",
"[",
"k",
"]",
"=",
"d",
"[",
"k",
"]",
"[",
"restrict",
"[",
"0",
"]",
":",
"restrict",
"[",
"1",
"]",
"]",
"elif",
"len",
"(",
"restrict",
")",
"==",
"4",
":",
"for",
"k",
"in",
"keys",
":",
"d",
"[",
"k",
"]",
"=",
"d",
"[",
"k",
"]",
"[",
"restrict",
"[",
"0",
"]",
":",
"restrict",
"[",
"1",
"]",
",",
"restrict",
"[",
"2",
"]",
":",
"restrict",
"[",
"3",
"]",
"]",
"elif",
"len",
"(",
"restrict",
")",
"==",
"6",
":",
"for",
"k",
"in",
"keys",
":",
"d",
"[",
"k",
"]",
"=",
"d",
"[",
"k",
"]",
"[",
"restrict",
"[",
"0",
"]",
":",
"restrict",
"[",
"1",
"]",
",",
"restrict",
"[",
"2",
"]",
":",
"restrict",
"[",
"3",
"]",
",",
"restrict",
"[",
"4",
"]",
":",
"restrict",
"[",
"5",
"]",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"restrict of length {} is not valid\"",
".",
"format",
"(",
"len",
"(",
"restrict",
")",
")",
")"
] |
Restrict data by indices.
Parameters:
----------
d -- the flds/sclr data
restrict -- a tuple of [xmin,xmax,...] etx
|
[
"Restrict",
"data",
"by",
"indices",
"."
] |
903b9d6427513b07986ffacf76cbca54e18d8be6
|
https://github.com/noobermin/lspreader/blob/903b9d6427513b07986ffacf76cbca54e18d8be6/lspreader/flds.py#L110-L140
|
240,430
|
mdeous/fatbotslim
|
fatbotslim/log.py
|
create_logger
|
def create_logger(name, level='INFO'):
"""
Creates a new ready-to-use logger.
:param name: new logger's name
:type name: str
:param level: default logging level.
:type level: :class:`str` or :class:`int`
:return: new logger.
:rtype: :class:`logging.Logger`
"""
formatter = ColorFormatter(LOG_FORMAT, DATE_FORMAT)
if not isinstance(logging.getLevelName(level), int):
level = 'INFO'
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(level)
logger.addHandler(handler)
return logger
|
python
|
def create_logger(name, level='INFO'):
"""
Creates a new ready-to-use logger.
:param name: new logger's name
:type name: str
:param level: default logging level.
:type level: :class:`str` or :class:`int`
:return: new logger.
:rtype: :class:`logging.Logger`
"""
formatter = ColorFormatter(LOG_FORMAT, DATE_FORMAT)
if not isinstance(logging.getLevelName(level), int):
level = 'INFO'
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(level)
logger.addHandler(handler)
return logger
|
[
"def",
"create_logger",
"(",
"name",
",",
"level",
"=",
"'INFO'",
")",
":",
"formatter",
"=",
"ColorFormatter",
"(",
"LOG_FORMAT",
",",
"DATE_FORMAT",
")",
"if",
"not",
"isinstance",
"(",
"logging",
".",
"getLevelName",
"(",
"level",
")",
",",
"int",
")",
":",
"level",
"=",
"'INFO'",
"handler",
"=",
"logging",
".",
"StreamHandler",
"(",
")",
"handler",
".",
"setFormatter",
"(",
"formatter",
")",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"name",
")",
"logger",
".",
"setLevel",
"(",
"level",
")",
"logger",
".",
"addHandler",
"(",
"handler",
")",
"return",
"logger"
] |
Creates a new ready-to-use logger.
:param name: new logger's name
:type name: str
:param level: default logging level.
:type level: :class:`str` or :class:`int`
:return: new logger.
:rtype: :class:`logging.Logger`
|
[
"Creates",
"a",
"new",
"ready",
"-",
"to",
"-",
"use",
"logger",
"."
] |
341595d24454a79caee23750eac271f9d0626c88
|
https://github.com/mdeous/fatbotslim/blob/341595d24454a79caee23750eac271f9d0626c88/fatbotslim/log.py#L65-L84
|
240,431
|
elifesciences/elife-article
|
elifearticle/parse.py
|
build_contributors
|
def build_contributors(authors, contrib_type, competing_interests=None):
"""
Given a list of authors from the parser, instantiate contributors
objects and build them
"""
contributors = []
for author in authors:
contributor = None
author_contrib_type = contrib_type
surname = author.get("surname")
given_name = author.get("given-names")
collab = author.get("collab")
# Small hack for on-behalf-of type when building authors
# use on-behalf-of as the contrib_type
if author.get("type") and author.get("type") == "on-behalf-of":
collab = author.get("on-behalf-of")
author_contrib_type = "on-behalf-of"
if surname or collab:
contributor = ea.Contributor(author_contrib_type, surname, given_name, collab)
utils.set_attr_if_value(contributor, 'suffix', author.get('suffix'))
contributor.group_author_key = author.get("group-author-key")
contributor.orcid = author.get("orcid")
contributor.corresp = bool(author.get("corresp"))
if author.get("equal-contrib") == "yes":
contributor.equal_contrib = True
# Add contributor affiliations
for aff in author.get("affiliations", []):
affiliation = ea.Affiliation()
affiliation.text = utils.text_from_affiliation_elements(
aff.get("dept"),
aff.get("institution"),
aff.get("city"),
aff.get("country"))
# fall back if no other fields are set take the text content
if affiliation.text == '':
affiliation.text = aff.get("text")
contributor.set_affiliation(affiliation)
# competing interests / conflicts
if (competing_interests and author.get("references")
and "competing-interest" in author.get("references")):
for ref_id in author["references"]["competing-interest"]:
for competing_interest in competing_interests:
if competing_interest.get("text") and competing_interest.get("id") == ref_id:
clean_text = utils.remove_tag('p', competing_interest.get("text"))
contributor.set_conflict(clean_text)
# Finally add the contributor to the list
if contributor:
contributors.append(contributor)
return contributors
|
python
|
def build_contributors(authors, contrib_type, competing_interests=None):
"""
Given a list of authors from the parser, instantiate contributors
objects and build them
"""
contributors = []
for author in authors:
contributor = None
author_contrib_type = contrib_type
surname = author.get("surname")
given_name = author.get("given-names")
collab = author.get("collab")
# Small hack for on-behalf-of type when building authors
# use on-behalf-of as the contrib_type
if author.get("type") and author.get("type") == "on-behalf-of":
collab = author.get("on-behalf-of")
author_contrib_type = "on-behalf-of"
if surname or collab:
contributor = ea.Contributor(author_contrib_type, surname, given_name, collab)
utils.set_attr_if_value(contributor, 'suffix', author.get('suffix'))
contributor.group_author_key = author.get("group-author-key")
contributor.orcid = author.get("orcid")
contributor.corresp = bool(author.get("corresp"))
if author.get("equal-contrib") == "yes":
contributor.equal_contrib = True
# Add contributor affiliations
for aff in author.get("affiliations", []):
affiliation = ea.Affiliation()
affiliation.text = utils.text_from_affiliation_elements(
aff.get("dept"),
aff.get("institution"),
aff.get("city"),
aff.get("country"))
# fall back if no other fields are set take the text content
if affiliation.text == '':
affiliation.text = aff.get("text")
contributor.set_affiliation(affiliation)
# competing interests / conflicts
if (competing_interests and author.get("references")
and "competing-interest" in author.get("references")):
for ref_id in author["references"]["competing-interest"]:
for competing_interest in competing_interests:
if competing_interest.get("text") and competing_interest.get("id") == ref_id:
clean_text = utils.remove_tag('p', competing_interest.get("text"))
contributor.set_conflict(clean_text)
# Finally add the contributor to the list
if contributor:
contributors.append(contributor)
return contributors
|
[
"def",
"build_contributors",
"(",
"authors",
",",
"contrib_type",
",",
"competing_interests",
"=",
"None",
")",
":",
"contributors",
"=",
"[",
"]",
"for",
"author",
"in",
"authors",
":",
"contributor",
"=",
"None",
"author_contrib_type",
"=",
"contrib_type",
"surname",
"=",
"author",
".",
"get",
"(",
"\"surname\"",
")",
"given_name",
"=",
"author",
".",
"get",
"(",
"\"given-names\"",
")",
"collab",
"=",
"author",
".",
"get",
"(",
"\"collab\"",
")",
"# Small hack for on-behalf-of type when building authors",
"# use on-behalf-of as the contrib_type",
"if",
"author",
".",
"get",
"(",
"\"type\"",
")",
"and",
"author",
".",
"get",
"(",
"\"type\"",
")",
"==",
"\"on-behalf-of\"",
":",
"collab",
"=",
"author",
".",
"get",
"(",
"\"on-behalf-of\"",
")",
"author_contrib_type",
"=",
"\"on-behalf-of\"",
"if",
"surname",
"or",
"collab",
":",
"contributor",
"=",
"ea",
".",
"Contributor",
"(",
"author_contrib_type",
",",
"surname",
",",
"given_name",
",",
"collab",
")",
"utils",
".",
"set_attr_if_value",
"(",
"contributor",
",",
"'suffix'",
",",
"author",
".",
"get",
"(",
"'suffix'",
")",
")",
"contributor",
".",
"group_author_key",
"=",
"author",
".",
"get",
"(",
"\"group-author-key\"",
")",
"contributor",
".",
"orcid",
"=",
"author",
".",
"get",
"(",
"\"orcid\"",
")",
"contributor",
".",
"corresp",
"=",
"bool",
"(",
"author",
".",
"get",
"(",
"\"corresp\"",
")",
")",
"if",
"author",
".",
"get",
"(",
"\"equal-contrib\"",
")",
"==",
"\"yes\"",
":",
"contributor",
".",
"equal_contrib",
"=",
"True",
"# Add contributor affiliations",
"for",
"aff",
"in",
"author",
".",
"get",
"(",
"\"affiliations\"",
",",
"[",
"]",
")",
":",
"affiliation",
"=",
"ea",
".",
"Affiliation",
"(",
")",
"affiliation",
".",
"text",
"=",
"utils",
".",
"text_from_affiliation_elements",
"(",
"aff",
".",
"get",
"(",
"\"dept\"",
")",
",",
"aff",
".",
"get",
"(",
"\"institution\"",
")",
",",
"aff",
".",
"get",
"(",
"\"city\"",
")",
",",
"aff",
".",
"get",
"(",
"\"country\"",
")",
")",
"# fall back if no other fields are set take the text content",
"if",
"affiliation",
".",
"text",
"==",
"''",
":",
"affiliation",
".",
"text",
"=",
"aff",
".",
"get",
"(",
"\"text\"",
")",
"contributor",
".",
"set_affiliation",
"(",
"affiliation",
")",
"# competing interests / conflicts",
"if",
"(",
"competing_interests",
"and",
"author",
".",
"get",
"(",
"\"references\"",
")",
"and",
"\"competing-interest\"",
"in",
"author",
".",
"get",
"(",
"\"references\"",
")",
")",
":",
"for",
"ref_id",
"in",
"author",
"[",
"\"references\"",
"]",
"[",
"\"competing-interest\"",
"]",
":",
"for",
"competing_interest",
"in",
"competing_interests",
":",
"if",
"competing_interest",
".",
"get",
"(",
"\"text\"",
")",
"and",
"competing_interest",
".",
"get",
"(",
"\"id\"",
")",
"==",
"ref_id",
":",
"clean_text",
"=",
"utils",
".",
"remove_tag",
"(",
"'p'",
",",
"competing_interest",
".",
"get",
"(",
"\"text\"",
")",
")",
"contributor",
".",
"set_conflict",
"(",
"clean_text",
")",
"# Finally add the contributor to the list",
"if",
"contributor",
":",
"contributors",
".",
"append",
"(",
"contributor",
")",
"return",
"contributors"
] |
Given a list of authors from the parser, instantiate contributors
objects and build them
|
[
"Given",
"a",
"list",
"of",
"authors",
"from",
"the",
"parser",
"instantiate",
"contributors",
"objects",
"and",
"build",
"them"
] |
99710c213cd81fe6fd1e5c150d6e20efe2d1e33b
|
https://github.com/elifesciences/elife-article/blob/99710c213cd81fe6fd1e5c150d6e20efe2d1e33b/elifearticle/parse.py#L16-L75
|
240,432
|
elifesciences/elife-article
|
elifearticle/parse.py
|
build_funding
|
def build_funding(award_groups):
"""
Given a funding data, format it
"""
if not award_groups:
return []
funding_awards = []
for award_groups_item in award_groups:
for award_group_id, award_group in iteritems(award_groups_item):
award = ea.FundingAward()
award.award_group_id = award_group_id
if award_group.get('id-type') == "FundRef":
award.institution_id = award_group.get('id')
award.institution_name = award_group.get('institution')
# TODO !!! Check for multiple award_id, if exists
if award_group.get('award-id'):
award.add_award_id(award_group.get('award-id'))
funding_awards.append(award)
return funding_awards
|
python
|
def build_funding(award_groups):
"""
Given a funding data, format it
"""
if not award_groups:
return []
funding_awards = []
for award_groups_item in award_groups:
for award_group_id, award_group in iteritems(award_groups_item):
award = ea.FundingAward()
award.award_group_id = award_group_id
if award_group.get('id-type') == "FundRef":
award.institution_id = award_group.get('id')
award.institution_name = award_group.get('institution')
# TODO !!! Check for multiple award_id, if exists
if award_group.get('award-id'):
award.add_award_id(award_group.get('award-id'))
funding_awards.append(award)
return funding_awards
|
[
"def",
"build_funding",
"(",
"award_groups",
")",
":",
"if",
"not",
"award_groups",
":",
"return",
"[",
"]",
"funding_awards",
"=",
"[",
"]",
"for",
"award_groups_item",
"in",
"award_groups",
":",
"for",
"award_group_id",
",",
"award_group",
"in",
"iteritems",
"(",
"award_groups_item",
")",
":",
"award",
"=",
"ea",
".",
"FundingAward",
"(",
")",
"award",
".",
"award_group_id",
"=",
"award_group_id",
"if",
"award_group",
".",
"get",
"(",
"'id-type'",
")",
"==",
"\"FundRef\"",
":",
"award",
".",
"institution_id",
"=",
"award_group",
".",
"get",
"(",
"'id'",
")",
"award",
".",
"institution_name",
"=",
"award_group",
".",
"get",
"(",
"'institution'",
")",
"# TODO !!! Check for multiple award_id, if exists",
"if",
"award_group",
".",
"get",
"(",
"'award-id'",
")",
":",
"award",
".",
"add_award_id",
"(",
"award_group",
".",
"get",
"(",
"'award-id'",
")",
")",
"funding_awards",
".",
"append",
"(",
"award",
")",
"return",
"funding_awards"
] |
Given a funding data, format it
|
[
"Given",
"a",
"funding",
"data",
"format",
"it"
] |
99710c213cd81fe6fd1e5c150d6e20efe2d1e33b
|
https://github.com/elifesciences/elife-article/blob/99710c213cd81fe6fd1e5c150d6e20efe2d1e33b/elifearticle/parse.py#L78-L102
|
240,433
|
elifesciences/elife-article
|
elifearticle/parse.py
|
build_datasets
|
def build_datasets(datasets_json):
"""
Given datasets in JSON format, build and return a list of dataset objects
"""
if not datasets_json:
return []
datasets = []
dataset_type_map = OrderedDict([
('generated', 'datasets'),
('used', 'prev_published_datasets')
])
dataset_type_map_found = []
# First look for the types of datasets present
for dataset_key, dataset_type in iteritems(dataset_type_map):
if datasets_json.get(dataset_key):
dataset_type_map_found.append(dataset_key)
# Continue with the found dataset types
for dataset_key in dataset_type_map_found:
dataset_type = dataset_type_map.get(dataset_key)
for dataset_values in datasets_json.get(dataset_key):
dataset = ea.Dataset()
utils.set_attr_if_value(dataset, 'dataset_type', dataset_type)
utils.set_attr_if_value(dataset, 'year', dataset_values.get('date'))
utils.set_attr_if_value(dataset, 'title', dataset_values.get('title'))
utils.set_attr_if_value(dataset, 'comment', dataset_values.get('details'))
utils.set_attr_if_value(dataset, 'doi', dataset_values.get('doi'))
utils.set_attr_if_value(dataset, 'uri', dataset_values.get('uri'))
utils.set_attr_if_value(dataset, 'accession_id', dataset_values.get('dataId'))
utils.set_attr_if_value(dataset, 'assigning_authority',
dataset_values.get('assigningAuthority'))
# authors
if dataset_values.get('authors'):
# parse JSON format authors into author objects
for author_json in dataset_values.get('authors'):
if utils.author_name_from_json(author_json):
dataset.add_author(utils.author_name_from_json(author_json))
# Try to populate the doi attribute if the uri is a doi
if not dataset.doi and dataset.uri:
if dataset.uri != eautils.doi_uri_to_doi(dataset.uri):
dataset.doi = eautils.doi_uri_to_doi(dataset.uri)
datasets.append(dataset)
return datasets
|
python
|
def build_datasets(datasets_json):
"""
Given datasets in JSON format, build and return a list of dataset objects
"""
if not datasets_json:
return []
datasets = []
dataset_type_map = OrderedDict([
('generated', 'datasets'),
('used', 'prev_published_datasets')
])
dataset_type_map_found = []
# First look for the types of datasets present
for dataset_key, dataset_type in iteritems(dataset_type_map):
if datasets_json.get(dataset_key):
dataset_type_map_found.append(dataset_key)
# Continue with the found dataset types
for dataset_key in dataset_type_map_found:
dataset_type = dataset_type_map.get(dataset_key)
for dataset_values in datasets_json.get(dataset_key):
dataset = ea.Dataset()
utils.set_attr_if_value(dataset, 'dataset_type', dataset_type)
utils.set_attr_if_value(dataset, 'year', dataset_values.get('date'))
utils.set_attr_if_value(dataset, 'title', dataset_values.get('title'))
utils.set_attr_if_value(dataset, 'comment', dataset_values.get('details'))
utils.set_attr_if_value(dataset, 'doi', dataset_values.get('doi'))
utils.set_attr_if_value(dataset, 'uri', dataset_values.get('uri'))
utils.set_attr_if_value(dataset, 'accession_id', dataset_values.get('dataId'))
utils.set_attr_if_value(dataset, 'assigning_authority',
dataset_values.get('assigningAuthority'))
# authors
if dataset_values.get('authors'):
# parse JSON format authors into author objects
for author_json in dataset_values.get('authors'):
if utils.author_name_from_json(author_json):
dataset.add_author(utils.author_name_from_json(author_json))
# Try to populate the doi attribute if the uri is a doi
if not dataset.doi and dataset.uri:
if dataset.uri != eautils.doi_uri_to_doi(dataset.uri):
dataset.doi = eautils.doi_uri_to_doi(dataset.uri)
datasets.append(dataset)
return datasets
|
[
"def",
"build_datasets",
"(",
"datasets_json",
")",
":",
"if",
"not",
"datasets_json",
":",
"return",
"[",
"]",
"datasets",
"=",
"[",
"]",
"dataset_type_map",
"=",
"OrderedDict",
"(",
"[",
"(",
"'generated'",
",",
"'datasets'",
")",
",",
"(",
"'used'",
",",
"'prev_published_datasets'",
")",
"]",
")",
"dataset_type_map_found",
"=",
"[",
"]",
"# First look for the types of datasets present",
"for",
"dataset_key",
",",
"dataset_type",
"in",
"iteritems",
"(",
"dataset_type_map",
")",
":",
"if",
"datasets_json",
".",
"get",
"(",
"dataset_key",
")",
":",
"dataset_type_map_found",
".",
"append",
"(",
"dataset_key",
")",
"# Continue with the found dataset types",
"for",
"dataset_key",
"in",
"dataset_type_map_found",
":",
"dataset_type",
"=",
"dataset_type_map",
".",
"get",
"(",
"dataset_key",
")",
"for",
"dataset_values",
"in",
"datasets_json",
".",
"get",
"(",
"dataset_key",
")",
":",
"dataset",
"=",
"ea",
".",
"Dataset",
"(",
")",
"utils",
".",
"set_attr_if_value",
"(",
"dataset",
",",
"'dataset_type'",
",",
"dataset_type",
")",
"utils",
".",
"set_attr_if_value",
"(",
"dataset",
",",
"'year'",
",",
"dataset_values",
".",
"get",
"(",
"'date'",
")",
")",
"utils",
".",
"set_attr_if_value",
"(",
"dataset",
",",
"'title'",
",",
"dataset_values",
".",
"get",
"(",
"'title'",
")",
")",
"utils",
".",
"set_attr_if_value",
"(",
"dataset",
",",
"'comment'",
",",
"dataset_values",
".",
"get",
"(",
"'details'",
")",
")",
"utils",
".",
"set_attr_if_value",
"(",
"dataset",
",",
"'doi'",
",",
"dataset_values",
".",
"get",
"(",
"'doi'",
")",
")",
"utils",
".",
"set_attr_if_value",
"(",
"dataset",
",",
"'uri'",
",",
"dataset_values",
".",
"get",
"(",
"'uri'",
")",
")",
"utils",
".",
"set_attr_if_value",
"(",
"dataset",
",",
"'accession_id'",
",",
"dataset_values",
".",
"get",
"(",
"'dataId'",
")",
")",
"utils",
".",
"set_attr_if_value",
"(",
"dataset",
",",
"'assigning_authority'",
",",
"dataset_values",
".",
"get",
"(",
"'assigningAuthority'",
")",
")",
"# authors",
"if",
"dataset_values",
".",
"get",
"(",
"'authors'",
")",
":",
"# parse JSON format authors into author objects",
"for",
"author_json",
"in",
"dataset_values",
".",
"get",
"(",
"'authors'",
")",
":",
"if",
"utils",
".",
"author_name_from_json",
"(",
"author_json",
")",
":",
"dataset",
".",
"add_author",
"(",
"utils",
".",
"author_name_from_json",
"(",
"author_json",
")",
")",
"# Try to populate the doi attribute if the uri is a doi",
"if",
"not",
"dataset",
".",
"doi",
"and",
"dataset",
".",
"uri",
":",
"if",
"dataset",
".",
"uri",
"!=",
"eautils",
".",
"doi_uri_to_doi",
"(",
"dataset",
".",
"uri",
")",
":",
"dataset",
".",
"doi",
"=",
"eautils",
".",
"doi_uri_to_doi",
"(",
"dataset",
".",
"uri",
")",
"datasets",
".",
"append",
"(",
"dataset",
")",
"return",
"datasets"
] |
Given datasets in JSON format, build and return a list of dataset objects
|
[
"Given",
"datasets",
"in",
"JSON",
"format",
"build",
"and",
"return",
"a",
"list",
"of",
"dataset",
"objects"
] |
99710c213cd81fe6fd1e5c150d6e20efe2d1e33b
|
https://github.com/elifesciences/elife-article/blob/99710c213cd81fe6fd1e5c150d6e20efe2d1e33b/elifearticle/parse.py#L105-L147
|
240,434
|
elifesciences/elife-article
|
elifearticle/parse.py
|
build_data_availability
|
def build_data_availability(datasets_json):
"""
Given datasets in JSON format, get the data availability from it if present
"""
data_availability = None
if 'availability' in datasets_json and datasets_json.get('availability'):
# only expect one paragraph of text
data_availability = datasets_json.get('availability')[0].get('text')
return data_availability
|
python
|
def build_data_availability(datasets_json):
"""
Given datasets in JSON format, get the data availability from it if present
"""
data_availability = None
if 'availability' in datasets_json and datasets_json.get('availability'):
# only expect one paragraph of text
data_availability = datasets_json.get('availability')[0].get('text')
return data_availability
|
[
"def",
"build_data_availability",
"(",
"datasets_json",
")",
":",
"data_availability",
"=",
"None",
"if",
"'availability'",
"in",
"datasets_json",
"and",
"datasets_json",
".",
"get",
"(",
"'availability'",
")",
":",
"# only expect one paragraph of text",
"data_availability",
"=",
"datasets_json",
".",
"get",
"(",
"'availability'",
")",
"[",
"0",
"]",
".",
"get",
"(",
"'text'",
")",
"return",
"data_availability"
] |
Given datasets in JSON format, get the data availability from it if present
|
[
"Given",
"datasets",
"in",
"JSON",
"format",
"get",
"the",
"data",
"availability",
"from",
"it",
"if",
"present"
] |
99710c213cd81fe6fd1e5c150d6e20efe2d1e33b
|
https://github.com/elifesciences/elife-article/blob/99710c213cd81fe6fd1e5c150d6e20efe2d1e33b/elifearticle/parse.py#L150-L158
|
240,435
|
elifesciences/elife-article
|
elifearticle/parse.py
|
component_title
|
def component_title(component):
"""
Label, title and caption
Title is the label text plus the title text
Title may contain italic tag, etc.
"""
title = u''
label_text = u''
title_text = u''
if component.get('label'):
label_text = component.get('label')
if component.get('title'):
title_text = component.get('title')
title = unicode_value(label_text)
if label_text != '' and title_text != '':
title += ' '
title += unicode_value(title_text)
if component.get('type') == 'abstract' and title == '':
title = 'Abstract'
return title
|
python
|
def component_title(component):
"""
Label, title and caption
Title is the label text plus the title text
Title may contain italic tag, etc.
"""
title = u''
label_text = u''
title_text = u''
if component.get('label'):
label_text = component.get('label')
if component.get('title'):
title_text = component.get('title')
title = unicode_value(label_text)
if label_text != '' and title_text != '':
title += ' '
title += unicode_value(title_text)
if component.get('type') == 'abstract' and title == '':
title = 'Abstract'
return title
|
[
"def",
"component_title",
"(",
"component",
")",
":",
"title",
"=",
"u''",
"label_text",
"=",
"u''",
"title_text",
"=",
"u''",
"if",
"component",
".",
"get",
"(",
"'label'",
")",
":",
"label_text",
"=",
"component",
".",
"get",
"(",
"'label'",
")",
"if",
"component",
".",
"get",
"(",
"'title'",
")",
":",
"title_text",
"=",
"component",
".",
"get",
"(",
"'title'",
")",
"title",
"=",
"unicode_value",
"(",
"label_text",
")",
"if",
"label_text",
"!=",
"''",
"and",
"title_text",
"!=",
"''",
":",
"title",
"+=",
"' '",
"title",
"+=",
"unicode_value",
"(",
"title_text",
")",
"if",
"component",
".",
"get",
"(",
"'type'",
")",
"==",
"'abstract'",
"and",
"title",
"==",
"''",
":",
"title",
"=",
"'Abstract'",
"return",
"title"
] |
Label, title and caption
Title is the label text plus the title text
Title may contain italic tag, etc.
|
[
"Label",
"title",
"and",
"caption",
"Title",
"is",
"the",
"label",
"text",
"plus",
"the",
"title",
"text",
"Title",
"may",
"contain",
"italic",
"tag",
"etc",
"."
] |
99710c213cd81fe6fd1e5c150d6e20efe2d1e33b
|
https://github.com/elifesciences/elife-article/blob/99710c213cd81fe6fd1e5c150d6e20efe2d1e33b/elifearticle/parse.py#L255-L280
|
240,436
|
elifesciences/elife-article
|
elifearticle/parse.py
|
build_components
|
def build_components(components):
"""
Given parsed components build a list of component objects
"""
component_list = []
for comp in components:
component = ea.Component()
# id
component.id = comp.get('id')
# type
component.type = comp.get('type')
# asset, if available
component.asset = comp.get('asset')
# DOI
component.doi = comp.get('doi')
if component_title(comp) != '':
component.title = component_title(comp)
# Subtitle
if comp.get('type') in ['supplementary-material', 'fig']:
if comp.get('full_caption'):
subtitle = comp.get('full_caption')
subtitle = clean_abstract(subtitle)
component.subtitle = subtitle
# Mime type
if comp.get('type') in ['abstract', 'table-wrap', 'sub-article',
'chem-struct-wrap', 'boxed-text']:
component.mime_type = 'text/plain'
if comp.get('type') in ['fig']:
component.mime_type = 'image/tiff'
elif comp.get('type') in ['media', 'supplementary-material']:
if comp.get('mimetype') and comp.get('mime-subtype'):
component.mime_type = (comp.get('mimetype') + '/'
+ comp.get('mime-subtype'))
# Permissions
component.permissions = comp.get('permissions')
# Append it to our list of components
component_list.append(component)
return component_list
|
python
|
def build_components(components):
"""
Given parsed components build a list of component objects
"""
component_list = []
for comp in components:
component = ea.Component()
# id
component.id = comp.get('id')
# type
component.type = comp.get('type')
# asset, if available
component.asset = comp.get('asset')
# DOI
component.doi = comp.get('doi')
if component_title(comp) != '':
component.title = component_title(comp)
# Subtitle
if comp.get('type') in ['supplementary-material', 'fig']:
if comp.get('full_caption'):
subtitle = comp.get('full_caption')
subtitle = clean_abstract(subtitle)
component.subtitle = subtitle
# Mime type
if comp.get('type') in ['abstract', 'table-wrap', 'sub-article',
'chem-struct-wrap', 'boxed-text']:
component.mime_type = 'text/plain'
if comp.get('type') in ['fig']:
component.mime_type = 'image/tiff'
elif comp.get('type') in ['media', 'supplementary-material']:
if comp.get('mimetype') and comp.get('mime-subtype'):
component.mime_type = (comp.get('mimetype') + '/'
+ comp.get('mime-subtype'))
# Permissions
component.permissions = comp.get('permissions')
# Append it to our list of components
component_list.append(component)
return component_list
|
[
"def",
"build_components",
"(",
"components",
")",
":",
"component_list",
"=",
"[",
"]",
"for",
"comp",
"in",
"components",
":",
"component",
"=",
"ea",
".",
"Component",
"(",
")",
"# id",
"component",
".",
"id",
"=",
"comp",
".",
"get",
"(",
"'id'",
")",
"# type",
"component",
".",
"type",
"=",
"comp",
".",
"get",
"(",
"'type'",
")",
"# asset, if available",
"component",
".",
"asset",
"=",
"comp",
".",
"get",
"(",
"'asset'",
")",
"# DOI",
"component",
".",
"doi",
"=",
"comp",
".",
"get",
"(",
"'doi'",
")",
"if",
"component_title",
"(",
"comp",
")",
"!=",
"''",
":",
"component",
".",
"title",
"=",
"component_title",
"(",
"comp",
")",
"# Subtitle",
"if",
"comp",
".",
"get",
"(",
"'type'",
")",
"in",
"[",
"'supplementary-material'",
",",
"'fig'",
"]",
":",
"if",
"comp",
".",
"get",
"(",
"'full_caption'",
")",
":",
"subtitle",
"=",
"comp",
".",
"get",
"(",
"'full_caption'",
")",
"subtitle",
"=",
"clean_abstract",
"(",
"subtitle",
")",
"component",
".",
"subtitle",
"=",
"subtitle",
"# Mime type",
"if",
"comp",
".",
"get",
"(",
"'type'",
")",
"in",
"[",
"'abstract'",
",",
"'table-wrap'",
",",
"'sub-article'",
",",
"'chem-struct-wrap'",
",",
"'boxed-text'",
"]",
":",
"component",
".",
"mime_type",
"=",
"'text/plain'",
"if",
"comp",
".",
"get",
"(",
"'type'",
")",
"in",
"[",
"'fig'",
"]",
":",
"component",
".",
"mime_type",
"=",
"'image/tiff'",
"elif",
"comp",
".",
"get",
"(",
"'type'",
")",
"in",
"[",
"'media'",
",",
"'supplementary-material'",
"]",
":",
"if",
"comp",
".",
"get",
"(",
"'mimetype'",
")",
"and",
"comp",
".",
"get",
"(",
"'mime-subtype'",
")",
":",
"component",
".",
"mime_type",
"=",
"(",
"comp",
".",
"get",
"(",
"'mimetype'",
")",
"+",
"'/'",
"+",
"comp",
".",
"get",
"(",
"'mime-subtype'",
")",
")",
"# Permissions",
"component",
".",
"permissions",
"=",
"comp",
".",
"get",
"(",
"'permissions'",
")",
"# Append it to our list of components",
"component_list",
".",
"append",
"(",
"component",
")",
"return",
"component_list"
] |
Given parsed components build a list of component objects
|
[
"Given",
"parsed",
"components",
"build",
"a",
"list",
"of",
"component",
"objects"
] |
99710c213cd81fe6fd1e5c150d6e20efe2d1e33b
|
https://github.com/elifesciences/elife-article/blob/99710c213cd81fe6fd1e5c150d6e20efe2d1e33b/elifearticle/parse.py#L283-L332
|
240,437
|
elifesciences/elife-article
|
elifearticle/parse.py
|
build_related_articles
|
def build_related_articles(related_articles):
"""
Given parsed data build a list of related article objects
"""
article_list = []
for related_article in related_articles:
article = ea.RelatedArticle()
if related_article.get('xlink_href'):
article.xlink_href = related_article.get('xlink_href')
if related_article.get('related_article_type'):
article.related_article_type = related_article.get('related_article_type')
if related_article.get('ext_link_type'):
article.ext_link_type = related_article.get('ext_link_type')
# Append it to our list
article_list.append(article)
return article_list
|
python
|
def build_related_articles(related_articles):
"""
Given parsed data build a list of related article objects
"""
article_list = []
for related_article in related_articles:
article = ea.RelatedArticle()
if related_article.get('xlink_href'):
article.xlink_href = related_article.get('xlink_href')
if related_article.get('related_article_type'):
article.related_article_type = related_article.get('related_article_type')
if related_article.get('ext_link_type'):
article.ext_link_type = related_article.get('ext_link_type')
# Append it to our list
article_list.append(article)
return article_list
|
[
"def",
"build_related_articles",
"(",
"related_articles",
")",
":",
"article_list",
"=",
"[",
"]",
"for",
"related_article",
"in",
"related_articles",
":",
"article",
"=",
"ea",
".",
"RelatedArticle",
"(",
")",
"if",
"related_article",
".",
"get",
"(",
"'xlink_href'",
")",
":",
"article",
".",
"xlink_href",
"=",
"related_article",
".",
"get",
"(",
"'xlink_href'",
")",
"if",
"related_article",
".",
"get",
"(",
"'related_article_type'",
")",
":",
"article",
".",
"related_article_type",
"=",
"related_article",
".",
"get",
"(",
"'related_article_type'",
")",
"if",
"related_article",
".",
"get",
"(",
"'ext_link_type'",
")",
":",
"article",
".",
"ext_link_type",
"=",
"related_article",
".",
"get",
"(",
"'ext_link_type'",
")",
"# Append it to our list",
"article_list",
".",
"append",
"(",
"article",
")",
"return",
"article_list"
] |
Given parsed data build a list of related article objects
|
[
"Given",
"parsed",
"data",
"build",
"a",
"list",
"of",
"related",
"article",
"objects"
] |
99710c213cd81fe6fd1e5c150d6e20efe2d1e33b
|
https://github.com/elifesciences/elife-article/blob/99710c213cd81fe6fd1e5c150d6e20efe2d1e33b/elifearticle/parse.py#L335-L353
|
240,438
|
elifesciences/elife-article
|
elifearticle/parse.py
|
build_pub_dates
|
def build_pub_dates(article, pub_dates):
"convert pub_dates into ArticleDate objects and add them to article"
for pub_date in pub_dates:
# always want a date type, take it from pub-type if must
if pub_date.get('date-type'):
date_instance = ea.ArticleDate(pub_date.get('date-type'),
pub_date.get('date'))
elif pub_date.get('pub-type'):
date_instance = ea.ArticleDate(pub_date.get('pub-type'),
pub_date.get('date'))
# Set more values
utils.set_attr_if_value(date_instance, 'pub_type', pub_date.get('pub-type'))
utils.set_attr_if_value(date_instance, 'publication_format',
pub_date.get('publication-format'))
utils.set_attr_if_value(date_instance, 'day', pub_date.get('day'))
utils.set_attr_if_value(date_instance, 'month', pub_date.get('month'))
utils.set_attr_if_value(date_instance, 'year', pub_date.get('year'))
article.add_date(date_instance)
|
python
|
def build_pub_dates(article, pub_dates):
"convert pub_dates into ArticleDate objects and add them to article"
for pub_date in pub_dates:
# always want a date type, take it from pub-type if must
if pub_date.get('date-type'):
date_instance = ea.ArticleDate(pub_date.get('date-type'),
pub_date.get('date'))
elif pub_date.get('pub-type'):
date_instance = ea.ArticleDate(pub_date.get('pub-type'),
pub_date.get('date'))
# Set more values
utils.set_attr_if_value(date_instance, 'pub_type', pub_date.get('pub-type'))
utils.set_attr_if_value(date_instance, 'publication_format',
pub_date.get('publication-format'))
utils.set_attr_if_value(date_instance, 'day', pub_date.get('day'))
utils.set_attr_if_value(date_instance, 'month', pub_date.get('month'))
utils.set_attr_if_value(date_instance, 'year', pub_date.get('year'))
article.add_date(date_instance)
|
[
"def",
"build_pub_dates",
"(",
"article",
",",
"pub_dates",
")",
":",
"for",
"pub_date",
"in",
"pub_dates",
":",
"# always want a date type, take it from pub-type if must",
"if",
"pub_date",
".",
"get",
"(",
"'date-type'",
")",
":",
"date_instance",
"=",
"ea",
".",
"ArticleDate",
"(",
"pub_date",
".",
"get",
"(",
"'date-type'",
")",
",",
"pub_date",
".",
"get",
"(",
"'date'",
")",
")",
"elif",
"pub_date",
".",
"get",
"(",
"'pub-type'",
")",
":",
"date_instance",
"=",
"ea",
".",
"ArticleDate",
"(",
"pub_date",
".",
"get",
"(",
"'pub-type'",
")",
",",
"pub_date",
".",
"get",
"(",
"'date'",
")",
")",
"# Set more values",
"utils",
".",
"set_attr_if_value",
"(",
"date_instance",
",",
"'pub_type'",
",",
"pub_date",
".",
"get",
"(",
"'pub-type'",
")",
")",
"utils",
".",
"set_attr_if_value",
"(",
"date_instance",
",",
"'publication_format'",
",",
"pub_date",
".",
"get",
"(",
"'publication-format'",
")",
")",
"utils",
".",
"set_attr_if_value",
"(",
"date_instance",
",",
"'day'",
",",
"pub_date",
".",
"get",
"(",
"'day'",
")",
")",
"utils",
".",
"set_attr_if_value",
"(",
"date_instance",
",",
"'month'",
",",
"pub_date",
".",
"get",
"(",
"'month'",
")",
")",
"utils",
".",
"set_attr_if_value",
"(",
"date_instance",
",",
"'year'",
",",
"pub_date",
".",
"get",
"(",
"'year'",
")",
")",
"article",
".",
"add_date",
"(",
"date_instance",
")"
] |
convert pub_dates into ArticleDate objects and add them to article
|
[
"convert",
"pub_dates",
"into",
"ArticleDate",
"objects",
"and",
"add",
"them",
"to",
"article"
] |
99710c213cd81fe6fd1e5c150d6e20efe2d1e33b
|
https://github.com/elifesciences/elife-article/blob/99710c213cd81fe6fd1e5c150d6e20efe2d1e33b/elifearticle/parse.py#L356-L373
|
240,439
|
elifesciences/elife-article
|
elifearticle/parse.py
|
build_self_uri_list
|
def build_self_uri_list(self_uri_list):
"parse the self-uri tags, build Uri objects"
uri_list = []
for self_uri in self_uri_list:
uri = ea.Uri()
utils.set_attr_if_value(uri, 'xlink_href', self_uri.get('xlink_href'))
utils.set_attr_if_value(uri, 'content_type', self_uri.get('content-type'))
uri_list.append(uri)
return uri_list
|
python
|
def build_self_uri_list(self_uri_list):
"parse the self-uri tags, build Uri objects"
uri_list = []
for self_uri in self_uri_list:
uri = ea.Uri()
utils.set_attr_if_value(uri, 'xlink_href', self_uri.get('xlink_href'))
utils.set_attr_if_value(uri, 'content_type', self_uri.get('content-type'))
uri_list.append(uri)
return uri_list
|
[
"def",
"build_self_uri_list",
"(",
"self_uri_list",
")",
":",
"uri_list",
"=",
"[",
"]",
"for",
"self_uri",
"in",
"self_uri_list",
":",
"uri",
"=",
"ea",
".",
"Uri",
"(",
")",
"utils",
".",
"set_attr_if_value",
"(",
"uri",
",",
"'xlink_href'",
",",
"self_uri",
".",
"get",
"(",
"'xlink_href'",
")",
")",
"utils",
".",
"set_attr_if_value",
"(",
"uri",
",",
"'content_type'",
",",
"self_uri",
".",
"get",
"(",
"'content-type'",
")",
")",
"uri_list",
".",
"append",
"(",
"uri",
")",
"return",
"uri_list"
] |
parse the self-uri tags, build Uri objects
|
[
"parse",
"the",
"self",
"-",
"uri",
"tags",
"build",
"Uri",
"objects"
] |
99710c213cd81fe6fd1e5c150d6e20efe2d1e33b
|
https://github.com/elifesciences/elife-article/blob/99710c213cd81fe6fd1e5c150d6e20efe2d1e33b/elifearticle/parse.py#L376-L384
|
240,440
|
elifesciences/elife-article
|
elifearticle/parse.py
|
clean_abstract
|
def clean_abstract(abstract, remove_tags=['xref', 'ext-link', 'inline-formula', 'mml:*']):
"""
Remove unwanted tags from abstract string,
parsing it as HTML, then only keep the body paragraph contents
"""
if remove_tags:
for tag_name in remove_tags:
abstract = utils.remove_tag(tag_name, abstract)
return abstract
|
python
|
def clean_abstract(abstract, remove_tags=['xref', 'ext-link', 'inline-formula', 'mml:*']):
"""
Remove unwanted tags from abstract string,
parsing it as HTML, then only keep the body paragraph contents
"""
if remove_tags:
for tag_name in remove_tags:
abstract = utils.remove_tag(tag_name, abstract)
return abstract
|
[
"def",
"clean_abstract",
"(",
"abstract",
",",
"remove_tags",
"=",
"[",
"'xref'",
",",
"'ext-link'",
",",
"'inline-formula'",
",",
"'mml:*'",
"]",
")",
":",
"if",
"remove_tags",
":",
"for",
"tag_name",
"in",
"remove_tags",
":",
"abstract",
"=",
"utils",
".",
"remove_tag",
"(",
"tag_name",
",",
"abstract",
")",
"return",
"abstract"
] |
Remove unwanted tags from abstract string,
parsing it as HTML, then only keep the body paragraph contents
|
[
"Remove",
"unwanted",
"tags",
"from",
"abstract",
"string",
"parsing",
"it",
"as",
"HTML",
"then",
"only",
"keep",
"the",
"body",
"paragraph",
"contents"
] |
99710c213cd81fe6fd1e5c150d6e20efe2d1e33b
|
https://github.com/elifesciences/elife-article/blob/99710c213cd81fe6fd1e5c150d6e20efe2d1e33b/elifearticle/parse.py#L387-L395
|
240,441
|
elifesciences/elife-article
|
elifearticle/parse.py
|
build_articles_from_article_xmls
|
def build_articles_from_article_xmls(article_xmls, detail="full",
build_parts=None, remove_tags=None):
"""
Given a list of article XML filenames, convert to article objects
"""
poa_articles = []
for article_xml in article_xmls:
print("working on ", article_xml)
article, error_count = build_article_from_xml(article_xml, detail,
build_parts, remove_tags)
if error_count == 0:
poa_articles.append(article)
return poa_articles
|
python
|
def build_articles_from_article_xmls(article_xmls, detail="full",
build_parts=None, remove_tags=None):
"""
Given a list of article XML filenames, convert to article objects
"""
poa_articles = []
for article_xml in article_xmls:
print("working on ", article_xml)
article, error_count = build_article_from_xml(article_xml, detail,
build_parts, remove_tags)
if error_count == 0:
poa_articles.append(article)
return poa_articles
|
[
"def",
"build_articles_from_article_xmls",
"(",
"article_xmls",
",",
"detail",
"=",
"\"full\"",
",",
"build_parts",
"=",
"None",
",",
"remove_tags",
"=",
"None",
")",
":",
"poa_articles",
"=",
"[",
"]",
"for",
"article_xml",
"in",
"article_xmls",
":",
"print",
"(",
"\"working on \"",
",",
"article_xml",
")",
"article",
",",
"error_count",
"=",
"build_article_from_xml",
"(",
"article_xml",
",",
"detail",
",",
"build_parts",
",",
"remove_tags",
")",
"if",
"error_count",
"==",
"0",
":",
"poa_articles",
".",
"append",
"(",
"article",
")",
"return",
"poa_articles"
] |
Given a list of article XML filenames, convert to article objects
|
[
"Given",
"a",
"list",
"of",
"article",
"XML",
"filenames",
"convert",
"to",
"article",
"objects"
] |
99710c213cd81fe6fd1e5c150d6e20efe2d1e33b
|
https://github.com/elifesciences/elife-article/blob/99710c213cd81fe6fd1e5c150d6e20efe2d1e33b/elifearticle/parse.py#L571-L586
|
240,442
|
CivicSpleen/ckcache
|
ckcache/__init__.py
|
accounts
|
def accounts():
"""Load the accounts YAML file and return a dict """
import yaml
for path in account_files:
try:
c_dir = os.path.dirname(path)
if not os.path.exists(c_dir):
os.makedirs(c_dir)
with open(path, 'rb') as f:
return yaml.load(f)['accounts']
except (OSError, IOError) as e:
pass
return {}
|
python
|
def accounts():
"""Load the accounts YAML file and return a dict """
import yaml
for path in account_files:
try:
c_dir = os.path.dirname(path)
if not os.path.exists(c_dir):
os.makedirs(c_dir)
with open(path, 'rb') as f:
return yaml.load(f)['accounts']
except (OSError, IOError) as e:
pass
return {}
|
[
"def",
"accounts",
"(",
")",
":",
"import",
"yaml",
"for",
"path",
"in",
"account_files",
":",
"try",
":",
"c_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"c_dir",
")",
":",
"os",
".",
"makedirs",
"(",
"c_dir",
")",
"with",
"open",
"(",
"path",
",",
"'rb'",
")",
"as",
"f",
":",
"return",
"yaml",
".",
"load",
"(",
"f",
")",
"[",
"'accounts'",
"]",
"except",
"(",
"OSError",
",",
"IOError",
")",
"as",
"e",
":",
"pass",
"return",
"{",
"}"
] |
Load the accounts YAML file and return a dict
|
[
"Load",
"the",
"accounts",
"YAML",
"file",
"and",
"return",
"a",
"dict"
] |
0c699b6ba97ff164e9702504f0e1643dd4cd39e1
|
https://github.com/CivicSpleen/ckcache/blob/0c699b6ba97ff164e9702504f0e1643dd4cd39e1/ckcache/__init__.py#L57-L75
|
240,443
|
CivicSpleen/ckcache
|
ckcache/__init__.py
|
get_logger
|
def get_logger(
name, file_name=None, stream=None, template=None, propagate=False):
"""Get a logger by name
if file_name is specified, and the dirname() of the file_name exists, it will
write to that file. If the dirname dies not exist, it will silently ignre it. """
logger = logging.getLogger(name)
if propagate is not None:
logger.propagate = propagate
for handler in logger.handlers:
logger.removeHandler(handler)
if not template:
template = "%(name)s %(process)s %(levelname)s %(message)s"
formatter = logging.Formatter(template)
if not file_name and not stream:
stream = sys.stdout
handlers = []
if stream is not None:
handlers.append(logging.StreamHandler(stream=stream))
if file_name is not None:
if os.path.isdir(os.path.dirname(file_name)):
handlers.append(logging.FileHandler(file_name))
else:
print("ERROR: Can't open log file {}".format(file_name))
for ch in handlers:
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.setLevel(logging.INFO)
return logger
|
python
|
def get_logger(
name, file_name=None, stream=None, template=None, propagate=False):
"""Get a logger by name
if file_name is specified, and the dirname() of the file_name exists, it will
write to that file. If the dirname dies not exist, it will silently ignre it. """
logger = logging.getLogger(name)
if propagate is not None:
logger.propagate = propagate
for handler in logger.handlers:
logger.removeHandler(handler)
if not template:
template = "%(name)s %(process)s %(levelname)s %(message)s"
formatter = logging.Formatter(template)
if not file_name and not stream:
stream = sys.stdout
handlers = []
if stream is not None:
handlers.append(logging.StreamHandler(stream=stream))
if file_name is not None:
if os.path.isdir(os.path.dirname(file_name)):
handlers.append(logging.FileHandler(file_name))
else:
print("ERROR: Can't open log file {}".format(file_name))
for ch in handlers:
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.setLevel(logging.INFO)
return logger
|
[
"def",
"get_logger",
"(",
"name",
",",
"file_name",
"=",
"None",
",",
"stream",
"=",
"None",
",",
"template",
"=",
"None",
",",
"propagate",
"=",
"False",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"name",
")",
"if",
"propagate",
"is",
"not",
"None",
":",
"logger",
".",
"propagate",
"=",
"propagate",
"for",
"handler",
"in",
"logger",
".",
"handlers",
":",
"logger",
".",
"removeHandler",
"(",
"handler",
")",
"if",
"not",
"template",
":",
"template",
"=",
"\"%(name)s %(process)s %(levelname)s %(message)s\"",
"formatter",
"=",
"logging",
".",
"Formatter",
"(",
"template",
")",
"if",
"not",
"file_name",
"and",
"not",
"stream",
":",
"stream",
"=",
"sys",
".",
"stdout",
"handlers",
"=",
"[",
"]",
"if",
"stream",
"is",
"not",
"None",
":",
"handlers",
".",
"append",
"(",
"logging",
".",
"StreamHandler",
"(",
"stream",
"=",
"stream",
")",
")",
"if",
"file_name",
"is",
"not",
"None",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"file_name",
")",
")",
":",
"handlers",
".",
"append",
"(",
"logging",
".",
"FileHandler",
"(",
"file_name",
")",
")",
"else",
":",
"print",
"(",
"\"ERROR: Can't open log file {}\"",
".",
"format",
"(",
"file_name",
")",
")",
"for",
"ch",
"in",
"handlers",
":",
"ch",
".",
"setFormatter",
"(",
"formatter",
")",
"logger",
".",
"addHandler",
"(",
"ch",
")",
"logger",
".",
"setLevel",
"(",
"logging",
".",
"INFO",
")",
"return",
"logger"
] |
Get a logger by name
if file_name is specified, and the dirname() of the file_name exists, it will
write to that file. If the dirname dies not exist, it will silently ignre it.
|
[
"Get",
"a",
"logger",
"by",
"name"
] |
0c699b6ba97ff164e9702504f0e1643dd4cd39e1
|
https://github.com/CivicSpleen/ckcache/blob/0c699b6ba97ff164e9702504f0e1643dd4cd39e1/ckcache/__init__.py#L758-L800
|
240,444
|
CivicSpleen/ckcache
|
ckcache/__init__.py
|
md5_for_file
|
def md5_for_file(f, block_size=2 ** 20):
"""Generate an MD5 has for a possibly large file by breaking it into chunks"""
import hashlib
md5 = hashlib.md5()
try:
# Guess that f is a FLO.
f.seek(0)
while True:
data = f.read(block_size)
if not data:
break
md5.update(data)
return md5.hexdigest()
except AttributeError as e:
# Nope, not a FLO. Maybe string?
file_name = f
with open(file_name, 'rb') as f:
return md5_for_file(f, block_size)
|
python
|
def md5_for_file(f, block_size=2 ** 20):
"""Generate an MD5 has for a possibly large file by breaking it into chunks"""
import hashlib
md5 = hashlib.md5()
try:
# Guess that f is a FLO.
f.seek(0)
while True:
data = f.read(block_size)
if not data:
break
md5.update(data)
return md5.hexdigest()
except AttributeError as e:
# Nope, not a FLO. Maybe string?
file_name = f
with open(file_name, 'rb') as f:
return md5_for_file(f, block_size)
|
[
"def",
"md5_for_file",
"(",
"f",
",",
"block_size",
"=",
"2",
"**",
"20",
")",
":",
"import",
"hashlib",
"md5",
"=",
"hashlib",
".",
"md5",
"(",
")",
"try",
":",
"# Guess that f is a FLO.",
"f",
".",
"seek",
"(",
"0",
")",
"while",
"True",
":",
"data",
"=",
"f",
".",
"read",
"(",
"block_size",
")",
"if",
"not",
"data",
":",
"break",
"md5",
".",
"update",
"(",
"data",
")",
"return",
"md5",
".",
"hexdigest",
"(",
")",
"except",
"AttributeError",
"as",
"e",
":",
"# Nope, not a FLO. Maybe string?",
"file_name",
"=",
"f",
"with",
"open",
"(",
"file_name",
",",
"'rb'",
")",
"as",
"f",
":",
"return",
"md5_for_file",
"(",
"f",
",",
"block_size",
")"
] |
Generate an MD5 has for a possibly large file by breaking it into chunks
|
[
"Generate",
"an",
"MD5",
"has",
"for",
"a",
"possibly",
"large",
"file",
"by",
"breaking",
"it",
"into",
"chunks"
] |
0c699b6ba97ff164e9702504f0e1643dd4cd39e1
|
https://github.com/CivicSpleen/ckcache/blob/0c699b6ba97ff164e9702504f0e1643dd4cd39e1/ckcache/__init__.py#L803-L824
|
240,445
|
CivicSpleen/ckcache
|
ckcache/__init__.py
|
Cache.subcache
|
def subcache(self, path):
"""Clone this case, and extend the prefix"""
cache = self.clone()
cache.prefix = os.path.join(cache.prefix if cache.prefix else '', path)
return cache
|
python
|
def subcache(self, path):
"""Clone this case, and extend the prefix"""
cache = self.clone()
cache.prefix = os.path.join(cache.prefix if cache.prefix else '', path)
return cache
|
[
"def",
"subcache",
"(",
"self",
",",
"path",
")",
":",
"cache",
"=",
"self",
".",
"clone",
"(",
")",
"cache",
".",
"prefix",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cache",
".",
"prefix",
"if",
"cache",
".",
"prefix",
"else",
"''",
",",
"path",
")",
"return",
"cache"
] |
Clone this case, and extend the prefix
|
[
"Clone",
"this",
"case",
"and",
"extend",
"the",
"prefix"
] |
0c699b6ba97ff164e9702504f0e1643dd4cd39e1
|
https://github.com/CivicSpleen/ckcache/blob/0c699b6ba97ff164e9702504f0e1643dd4cd39e1/ckcache/__init__.py#L214-L220
|
240,446
|
CivicSpleen/ckcache
|
ckcache/__init__.py
|
Cache.store_list
|
def store_list(self, cb=None):
"""List the cache and store it as metadata. This allows for getting the list from HTTP caches
and other types where it is not possible to traverse the tree"""
from StringIO import StringIO
import json
d = {}
for k, v in self.list().items():
if 'caches' in v:
del v['caches']
d[k] = v
strio = StringIO(json.dumps(d))
sink = self.put_stream('meta/_list.json')
copy_file_or_flo(strio, sink, cb=cb)
sink.close()
|
python
|
def store_list(self, cb=None):
"""List the cache and store it as metadata. This allows for getting the list from HTTP caches
and other types where it is not possible to traverse the tree"""
from StringIO import StringIO
import json
d = {}
for k, v in self.list().items():
if 'caches' in v:
del v['caches']
d[k] = v
strio = StringIO(json.dumps(d))
sink = self.put_stream('meta/_list.json')
copy_file_or_flo(strio, sink, cb=cb)
sink.close()
|
[
"def",
"store_list",
"(",
"self",
",",
"cb",
"=",
"None",
")",
":",
"from",
"StringIO",
"import",
"StringIO",
"import",
"json",
"d",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"self",
".",
"list",
"(",
")",
".",
"items",
"(",
")",
":",
"if",
"'caches'",
"in",
"v",
":",
"del",
"v",
"[",
"'caches'",
"]",
"d",
"[",
"k",
"]",
"=",
"v",
"strio",
"=",
"StringIO",
"(",
"json",
".",
"dumps",
"(",
"d",
")",
")",
"sink",
"=",
"self",
".",
"put_stream",
"(",
"'meta/_list.json'",
")",
"copy_file_or_flo",
"(",
"strio",
",",
"sink",
",",
"cb",
"=",
"cb",
")",
"sink",
".",
"close",
"(",
")"
] |
List the cache and store it as metadata. This allows for getting the list from HTTP caches
and other types where it is not possible to traverse the tree
|
[
"List",
"the",
"cache",
"and",
"store",
"it",
"as",
"metadata",
".",
"This",
"allows",
"for",
"getting",
"the",
"list",
"from",
"HTTP",
"caches",
"and",
"other",
"types",
"where",
"it",
"is",
"not",
"possible",
"to",
"traverse",
"the",
"tree"
] |
0c699b6ba97ff164e9702504f0e1643dd4cd39e1
|
https://github.com/CivicSpleen/ckcache/blob/0c699b6ba97ff164e9702504f0e1643dd4cd39e1/ckcache/__init__.py#L327-L348
|
240,447
|
CivicSpleen/ckcache
|
ckcache/__init__.py
|
Cache.attach
|
def attach(self, upstream):
"""Attach an upstream to the last upstream. Can be removed with detach"""
if upstream == self.last_upstream():
raise Exception("Can't attach a cache to itself")
self._prior_upstreams.append(self.last_upstream())
self.last_upstream().upstream = upstream
|
python
|
def attach(self, upstream):
"""Attach an upstream to the last upstream. Can be removed with detach"""
if upstream == self.last_upstream():
raise Exception("Can't attach a cache to itself")
self._prior_upstreams.append(self.last_upstream())
self.last_upstream().upstream = upstream
|
[
"def",
"attach",
"(",
"self",
",",
"upstream",
")",
":",
"if",
"upstream",
"==",
"self",
".",
"last_upstream",
"(",
")",
":",
"raise",
"Exception",
"(",
"\"Can't attach a cache to itself\"",
")",
"self",
".",
"_prior_upstreams",
".",
"append",
"(",
"self",
".",
"last_upstream",
"(",
")",
")",
"self",
".",
"last_upstream",
"(",
")",
".",
"upstream",
"=",
"upstream"
] |
Attach an upstream to the last upstream. Can be removed with detach
|
[
"Attach",
"an",
"upstream",
"to",
"the",
"last",
"upstream",
".",
"Can",
"be",
"removed",
"with",
"detach"
] |
0c699b6ba97ff164e9702504f0e1643dd4cd39e1
|
https://github.com/CivicSpleen/ckcache/blob/0c699b6ba97ff164e9702504f0e1643dd4cd39e1/ckcache/__init__.py#L350-L358
|
240,448
|
CivicSpleen/ckcache
|
ckcache/__init__.py
|
Cache.get_upstream
|
def get_upstream(self, type_):
'''Return self, or an upstream, that has the given class type.
This is typically used to find upstream s that impoement the RemoteInterface
'''
if isinstance(self, type_):
return self
elif self.upstream and isinstance(self.upstream, type_):
return self.upstream
elif self.upstream:
return self.upstream.get_upstream(type_)
else:
return None
|
python
|
def get_upstream(self, type_):
'''Return self, or an upstream, that has the given class type.
This is typically used to find upstream s that impoement the RemoteInterface
'''
if isinstance(self, type_):
return self
elif self.upstream and isinstance(self.upstream, type_):
return self.upstream
elif self.upstream:
return self.upstream.get_upstream(type_)
else:
return None
|
[
"def",
"get_upstream",
"(",
"self",
",",
"type_",
")",
":",
"if",
"isinstance",
"(",
"self",
",",
"type_",
")",
":",
"return",
"self",
"elif",
"self",
".",
"upstream",
"and",
"isinstance",
"(",
"self",
".",
"upstream",
",",
"type_",
")",
":",
"return",
"self",
".",
"upstream",
"elif",
"self",
".",
"upstream",
":",
"return",
"self",
".",
"upstream",
".",
"get_upstream",
"(",
"type_",
")",
"else",
":",
"return",
"None"
] |
Return self, or an upstream, that has the given class type.
This is typically used to find upstream s that impoement the RemoteInterface
|
[
"Return",
"self",
"or",
"an",
"upstream",
"that",
"has",
"the",
"given",
"class",
"type",
".",
"This",
"is",
"typically",
"used",
"to",
"find",
"upstream",
"s",
"that",
"impoement",
"the",
"RemoteInterface"
] |
0c699b6ba97ff164e9702504f0e1643dd4cd39e1
|
https://github.com/CivicSpleen/ckcache/blob/0c699b6ba97ff164e9702504f0e1643dd4cd39e1/ckcache/__init__.py#L373-L385
|
240,449
|
MacHu-GWU/angora-project
|
angora/crawler/simplecrawler.py
|
SmartDecoder.decode
|
def decode(self, a_bytes, encoding):
"""A 'try as much as we can' strategy decoding method.
'try as much as we can' feature:
Some time most of byte are encoded correctly. So chardet is able to
detect the encoding. But, sometime some bytes in the middle are not
encoded correctly. So it is still unable to apply
bytes.decode("encoding-method")
Example::
b"82347912350898143059043958290345" # 3059 is not right.
# [-----Good----][-Bad][---Good---]
What we do is to drop those bad encoded bytes, and try to recovery text
as much as possible. So this method is to recursively call it self, and
try to decode good bytes chunk, and finally concatenate them together.
:param a_bytes: the bytes that encoding is unknown.
:type a_bytes: bytes
:param encoding: how you gonna decode a_bytes
:type encoding: str
"""
try:
return (a_bytes.decode(encoding), encoding)
except Exception as e:
ind = self.catch_position_in_UnicodeDecodeError_message(str(e))
return (a_bytes[:ind].decode(encoding) + self.decode(a_bytes[(ind + 2):], encoding)[0],
encoding)
|
python
|
def decode(self, a_bytes, encoding):
"""A 'try as much as we can' strategy decoding method.
'try as much as we can' feature:
Some time most of byte are encoded correctly. So chardet is able to
detect the encoding. But, sometime some bytes in the middle are not
encoded correctly. So it is still unable to apply
bytes.decode("encoding-method")
Example::
b"82347912350898143059043958290345" # 3059 is not right.
# [-----Good----][-Bad][---Good---]
What we do is to drop those bad encoded bytes, and try to recovery text
as much as possible. So this method is to recursively call it self, and
try to decode good bytes chunk, and finally concatenate them together.
:param a_bytes: the bytes that encoding is unknown.
:type a_bytes: bytes
:param encoding: how you gonna decode a_bytes
:type encoding: str
"""
try:
return (a_bytes.decode(encoding), encoding)
except Exception as e:
ind = self.catch_position_in_UnicodeDecodeError_message(str(e))
return (a_bytes[:ind].decode(encoding) + self.decode(a_bytes[(ind + 2):], encoding)[0],
encoding)
|
[
"def",
"decode",
"(",
"self",
",",
"a_bytes",
",",
"encoding",
")",
":",
"try",
":",
"return",
"(",
"a_bytes",
".",
"decode",
"(",
"encoding",
")",
",",
"encoding",
")",
"except",
"Exception",
"as",
"e",
":",
"ind",
"=",
"self",
".",
"catch_position_in_UnicodeDecodeError_message",
"(",
"str",
"(",
"e",
")",
")",
"return",
"(",
"a_bytes",
"[",
":",
"ind",
"]",
".",
"decode",
"(",
"encoding",
")",
"+",
"self",
".",
"decode",
"(",
"a_bytes",
"[",
"(",
"ind",
"+",
"2",
")",
":",
"]",
",",
"encoding",
")",
"[",
"0",
"]",
",",
"encoding",
")"
] |
A 'try as much as we can' strategy decoding method.
'try as much as we can' feature:
Some time most of byte are encoded correctly. So chardet is able to
detect the encoding. But, sometime some bytes in the middle are not
encoded correctly. So it is still unable to apply
bytes.decode("encoding-method")
Example::
b"82347912350898143059043958290345" # 3059 is not right.
# [-----Good----][-Bad][---Good---]
What we do is to drop those bad encoded bytes, and try to recovery text
as much as possible. So this method is to recursively call it self, and
try to decode good bytes chunk, and finally concatenate them together.
:param a_bytes: the bytes that encoding is unknown.
:type a_bytes: bytes
:param encoding: how you gonna decode a_bytes
:type encoding: str
|
[
"A",
"try",
"as",
"much",
"as",
"we",
"can",
"strategy",
"decoding",
"method",
"."
] |
689a60da51cd88680ddbe26e28dbe81e6b01d275
|
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/crawler/simplecrawler.py#L62-L91
|
240,450
|
MacHu-GWU/angora-project
|
angora/crawler/simplecrawler.py
|
SmartDecoder.autodecode
|
def autodecode(self, a_bytes):
"""Automatically detect encoding, and decode bytes.
"""
try: # 如果装了chardet
analysis = chardet.detect(a_bytes)
if analysis["confidence"] >= 0.75: # 如果可信
return (self.decode(a_bytes, analysis["encoding"])[0],
analysis["encoding"])
else: # 如果不可信, 打印异常
raise Exception("Failed to detect encoding. (%s, %s)" % (
analysis["confidence"],
analysis["encoding"]))
except NameError: # 如果没有装chardet
print(
"Warning! chardet not found. Use utf-8 as default encoding instead.")
return (a_bytes.decode("utf-8")[0],
"utf-8")
|
python
|
def autodecode(self, a_bytes):
"""Automatically detect encoding, and decode bytes.
"""
try: # 如果装了chardet
analysis = chardet.detect(a_bytes)
if analysis["confidence"] >= 0.75: # 如果可信
return (self.decode(a_bytes, analysis["encoding"])[0],
analysis["encoding"])
else: # 如果不可信, 打印异常
raise Exception("Failed to detect encoding. (%s, %s)" % (
analysis["confidence"],
analysis["encoding"]))
except NameError: # 如果没有装chardet
print(
"Warning! chardet not found. Use utf-8 as default encoding instead.")
return (a_bytes.decode("utf-8")[0],
"utf-8")
|
[
"def",
"autodecode",
"(",
"self",
",",
"a_bytes",
")",
":",
"try",
":",
"# 如果装了chardet",
"analysis",
"=",
"chardet",
".",
"detect",
"(",
"a_bytes",
")",
"if",
"analysis",
"[",
"\"confidence\"",
"]",
">=",
"0.75",
":",
"# 如果可信",
"return",
"(",
"self",
".",
"decode",
"(",
"a_bytes",
",",
"analysis",
"[",
"\"encoding\"",
"]",
")",
"[",
"0",
"]",
",",
"analysis",
"[",
"\"encoding\"",
"]",
")",
"else",
":",
"# 如果不可信, 打印异常",
"raise",
"Exception",
"(",
"\"Failed to detect encoding. (%s, %s)\"",
"%",
"(",
"analysis",
"[",
"\"confidence\"",
"]",
",",
"analysis",
"[",
"\"encoding\"",
"]",
")",
")",
"except",
"NameError",
":",
"# 如果没有装chardet",
"print",
"(",
"\"Warning! chardet not found. Use utf-8 as default encoding instead.\"",
")",
"return",
"(",
"a_bytes",
".",
"decode",
"(",
"\"utf-8\"",
")",
"[",
"0",
"]",
",",
"\"utf-8\"",
")"
] |
Automatically detect encoding, and decode bytes.
|
[
"Automatically",
"detect",
"encoding",
"and",
"decode",
"bytes",
"."
] |
689a60da51cd88680ddbe26e28dbe81e6b01d275
|
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/crawler/simplecrawler.py#L93-L109
|
240,451
|
MacHu-GWU/angora-project
|
angora/crawler/simplecrawler.py
|
SimpleCrawler.login
|
def login(self, url, payload):
"""Performe log in.
url is the login page url, for example:
https://login.secureserver.net/index.php?
payload includes the account and password for example:
``{"loginlist": "YourAccount", "password": "YourPassword"}``
"""
self.auth = requests.Session()
try:
self.auth.post(url, data=payload, timeout=self.default_timeout)
print("successfully logged in to %s" % url)
return True
except:
return False
|
python
|
def login(self, url, payload):
"""Performe log in.
url is the login page url, for example:
https://login.secureserver.net/index.php?
payload includes the account and password for example:
``{"loginlist": "YourAccount", "password": "YourPassword"}``
"""
self.auth = requests.Session()
try:
self.auth.post(url, data=payload, timeout=self.default_timeout)
print("successfully logged in to %s" % url)
return True
except:
return False
|
[
"def",
"login",
"(",
"self",
",",
"url",
",",
"payload",
")",
":",
"self",
".",
"auth",
"=",
"requests",
".",
"Session",
"(",
")",
"try",
":",
"self",
".",
"auth",
".",
"post",
"(",
"url",
",",
"data",
"=",
"payload",
",",
"timeout",
"=",
"self",
".",
"default_timeout",
")",
"print",
"(",
"\"successfully logged in to %s\"",
"%",
"url",
")",
"return",
"True",
"except",
":",
"return",
"False"
] |
Performe log in.
url is the login page url, for example:
https://login.secureserver.net/index.php?
payload includes the account and password for example:
``{"loginlist": "YourAccount", "password": "YourPassword"}``
|
[
"Performe",
"log",
"in",
"."
] |
689a60da51cd88680ddbe26e28dbe81e6b01d275
|
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/crawler/simplecrawler.py#L149-L164
|
240,452
|
MacHu-GWU/angora-project
|
angora/crawler/simplecrawler.py
|
SimpleCrawler.get_response
|
def get_response(self, url, timeout=None):
"""Return http request response.
"""
if not timeout:
timeout = self.default_timeout
if self.default_sleeptime:
time.sleep(self.default_sleeptime)
try:
return self.auth.get(url, headers=self.default_header, timeout=self.default_timeout)
except:
return None
|
python
|
def get_response(self, url, timeout=None):
"""Return http request response.
"""
if not timeout:
timeout = self.default_timeout
if self.default_sleeptime:
time.sleep(self.default_sleeptime)
try:
return self.auth.get(url, headers=self.default_header, timeout=self.default_timeout)
except:
return None
|
[
"def",
"get_response",
"(",
"self",
",",
"url",
",",
"timeout",
"=",
"None",
")",
":",
"if",
"not",
"timeout",
":",
"timeout",
"=",
"self",
".",
"default_timeout",
"if",
"self",
".",
"default_sleeptime",
":",
"time",
".",
"sleep",
"(",
"self",
".",
"default_sleeptime",
")",
"try",
":",
"return",
"self",
".",
"auth",
".",
"get",
"(",
"url",
",",
"headers",
"=",
"self",
".",
"default_header",
",",
"timeout",
"=",
"self",
".",
"default_timeout",
")",
"except",
":",
"return",
"None"
] |
Return http request response.
|
[
"Return",
"http",
"request",
"response",
"."
] |
689a60da51cd88680ddbe26e28dbe81e6b01d275
|
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/crawler/simplecrawler.py#L171-L183
|
240,453
|
MacHu-GWU/angora-project
|
angora/crawler/simplecrawler.py
|
SimpleCrawler.html_with_encoding
|
def html_with_encoding(self, url, timeout=None, encoding="utf-8"):
"""Manually get html with user encoding setting.
"""
response = self.get_response(url, timeout=timeout)
if response:
return self.decoder.decode(response.content, encoding)[0]
else:
return None
|
python
|
def html_with_encoding(self, url, timeout=None, encoding="utf-8"):
"""Manually get html with user encoding setting.
"""
response = self.get_response(url, timeout=timeout)
if response:
return self.decoder.decode(response.content, encoding)[0]
else:
return None
|
[
"def",
"html_with_encoding",
"(",
"self",
",",
"url",
",",
"timeout",
"=",
"None",
",",
"encoding",
"=",
"\"utf-8\"",
")",
":",
"response",
"=",
"self",
".",
"get_response",
"(",
"url",
",",
"timeout",
"=",
"timeout",
")",
"if",
"response",
":",
"return",
"self",
".",
"decoder",
".",
"decode",
"(",
"response",
".",
"content",
",",
"encoding",
")",
"[",
"0",
"]",
"else",
":",
"return",
"None"
] |
Manually get html with user encoding setting.
|
[
"Manually",
"get",
"html",
"with",
"user",
"encoding",
"setting",
"."
] |
689a60da51cd88680ddbe26e28dbe81e6b01d275
|
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/crawler/simplecrawler.py#L185-L192
|
240,454
|
MacHu-GWU/angora-project
|
angora/crawler/simplecrawler.py
|
SimpleCrawler.html
|
def html(self, url, timeout=None):
"""High level method to get http request response in text.
smartly handle the encoding problem.
"""
response = self.get_response(url, timeout=timeout)
if response:
domain = self.get_domain(url)
if domain in self.domain_encoding_map: # domain have been visited
try: # apply extreme decoding
html = self.decoder.decode(response.content,
self.domain_encoding_map[domain])[0]
return html
except Exception as e:
print(e)
return None
else: # never visit this domain
try:
html, encoding = self.decoder.autodecode(response.content)
# save chardet analysis result
self.domain_encoding_map[domain] = encoding
return html
except Exception as e:
print(e)
return None
else:
return None
|
python
|
def html(self, url, timeout=None):
"""High level method to get http request response in text.
smartly handle the encoding problem.
"""
response = self.get_response(url, timeout=timeout)
if response:
domain = self.get_domain(url)
if domain in self.domain_encoding_map: # domain have been visited
try: # apply extreme decoding
html = self.decoder.decode(response.content,
self.domain_encoding_map[domain])[0]
return html
except Exception as e:
print(e)
return None
else: # never visit this domain
try:
html, encoding = self.decoder.autodecode(response.content)
# save chardet analysis result
self.domain_encoding_map[domain] = encoding
return html
except Exception as e:
print(e)
return None
else:
return None
|
[
"def",
"html",
"(",
"self",
",",
"url",
",",
"timeout",
"=",
"None",
")",
":",
"response",
"=",
"self",
".",
"get_response",
"(",
"url",
",",
"timeout",
"=",
"timeout",
")",
"if",
"response",
":",
"domain",
"=",
"self",
".",
"get_domain",
"(",
"url",
")",
"if",
"domain",
"in",
"self",
".",
"domain_encoding_map",
":",
"# domain have been visited",
"try",
":",
"# apply extreme decoding",
"html",
"=",
"self",
".",
"decoder",
".",
"decode",
"(",
"response",
".",
"content",
",",
"self",
".",
"domain_encoding_map",
"[",
"domain",
"]",
")",
"[",
"0",
"]",
"return",
"html",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"e",
")",
"return",
"None",
"else",
":",
"# never visit this domain",
"try",
":",
"html",
",",
"encoding",
"=",
"self",
".",
"decoder",
".",
"autodecode",
"(",
"response",
".",
"content",
")",
"# save chardet analysis result",
"self",
".",
"domain_encoding_map",
"[",
"domain",
"]",
"=",
"encoding",
"return",
"html",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"e",
")",
"return",
"None",
"else",
":",
"return",
"None"
] |
High level method to get http request response in text.
smartly handle the encoding problem.
|
[
"High",
"level",
"method",
"to",
"get",
"http",
"request",
"response",
"in",
"text",
".",
"smartly",
"handle",
"the",
"encoding",
"problem",
"."
] |
689a60da51cd88680ddbe26e28dbe81e6b01d275
|
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/crawler/simplecrawler.py#L194-L219
|
240,455
|
MacHu-GWU/angora-project
|
angora/crawler/simplecrawler.py
|
SimpleCrawler.binary
|
def binary(self, url, timeout=None):
"""High level method to get http request response in bytes.
"""
response = self.get_response(url, timeout=timeout)
if response:
return response.content
else:
return None
|
python
|
def binary(self, url, timeout=None):
"""High level method to get http request response in bytes.
"""
response = self.get_response(url, timeout=timeout)
if response:
return response.content
else:
return None
|
[
"def",
"binary",
"(",
"self",
",",
"url",
",",
"timeout",
"=",
"None",
")",
":",
"response",
"=",
"self",
".",
"get_response",
"(",
"url",
",",
"timeout",
"=",
"timeout",
")",
"if",
"response",
":",
"return",
"response",
".",
"content",
"else",
":",
"return",
"None"
] |
High level method to get http request response in bytes.
|
[
"High",
"level",
"method",
"to",
"get",
"http",
"request",
"response",
"in",
"bytes",
"."
] |
689a60da51cd88680ddbe26e28dbe81e6b01d275
|
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/crawler/simplecrawler.py#L221-L228
|
240,456
|
MacHu-GWU/angora-project
|
angora/crawler/simplecrawler.py
|
SimpleCrawler.download
|
def download(self, url, dst, timeout=None):
"""Download the binary file at url to distination path.
"""
response = self.get_response(url, timeout=timeout)
if response:
with open(dst, "wb") as f:
for block in response.iter_content(1024):
if not block:
break
f.write(block)
|
python
|
def download(self, url, dst, timeout=None):
"""Download the binary file at url to distination path.
"""
response = self.get_response(url, timeout=timeout)
if response:
with open(dst, "wb") as f:
for block in response.iter_content(1024):
if not block:
break
f.write(block)
|
[
"def",
"download",
"(",
"self",
",",
"url",
",",
"dst",
",",
"timeout",
"=",
"None",
")",
":",
"response",
"=",
"self",
".",
"get_response",
"(",
"url",
",",
"timeout",
"=",
"timeout",
")",
"if",
"response",
":",
"with",
"open",
"(",
"dst",
",",
"\"wb\"",
")",
"as",
"f",
":",
"for",
"block",
"in",
"response",
".",
"iter_content",
"(",
"1024",
")",
":",
"if",
"not",
"block",
":",
"break",
"f",
".",
"write",
"(",
"block",
")"
] |
Download the binary file at url to distination path.
|
[
"Download",
"the",
"binary",
"file",
"at",
"url",
"to",
"distination",
"path",
"."
] |
689a60da51cd88680ddbe26e28dbe81e6b01d275
|
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/crawler/simplecrawler.py#L230-L239
|
240,457
|
za-creature/gulpless
|
gulpless/handlers.py
|
Handler.changed
|
def changed(self, src, path, dest):
"""Called whenever `path` is changed in the source folder `src`. `dest`
is the output folder. The default implementation calls `build` after
determining that the input file is newer than any of the outputs, or
any of the outputs does not exist."""
try:
mtime = os.path.getmtime(os.path.join(src, path))
self._build(src, path, dest, mtime)
except EnvironmentError as e:
logging.error("{0} is inaccessible: {1}".format(
termcolor.colored(path, "yellow", attrs=["bold"]),
e.args[0]
))
|
python
|
def changed(self, src, path, dest):
"""Called whenever `path` is changed in the source folder `src`. `dest`
is the output folder. The default implementation calls `build` after
determining that the input file is newer than any of the outputs, or
any of the outputs does not exist."""
try:
mtime = os.path.getmtime(os.path.join(src, path))
self._build(src, path, dest, mtime)
except EnvironmentError as e:
logging.error("{0} is inaccessible: {1}".format(
termcolor.colored(path, "yellow", attrs=["bold"]),
e.args[0]
))
|
[
"def",
"changed",
"(",
"self",
",",
"src",
",",
"path",
",",
"dest",
")",
":",
"try",
":",
"mtime",
"=",
"os",
".",
"path",
".",
"getmtime",
"(",
"os",
".",
"path",
".",
"join",
"(",
"src",
",",
"path",
")",
")",
"self",
".",
"_build",
"(",
"src",
",",
"path",
",",
"dest",
",",
"mtime",
")",
"except",
"EnvironmentError",
"as",
"e",
":",
"logging",
".",
"error",
"(",
"\"{0} is inaccessible: {1}\"",
".",
"format",
"(",
"termcolor",
".",
"colored",
"(",
"path",
",",
"\"yellow\"",
",",
"attrs",
"=",
"[",
"\"bold\"",
"]",
")",
",",
"e",
".",
"args",
"[",
"0",
"]",
")",
")"
] |
Called whenever `path` is changed in the source folder `src`. `dest`
is the output folder. The default implementation calls `build` after
determining that the input file is newer than any of the outputs, or
any of the outputs does not exist.
|
[
"Called",
"whenever",
"path",
"is",
"changed",
"in",
"the",
"source",
"folder",
"src",
".",
"dest",
"is",
"the",
"output",
"folder",
".",
"The",
"default",
"implementation",
"calls",
"build",
"after",
"determining",
"that",
"the",
"input",
"file",
"is",
"newer",
"than",
"any",
"of",
"the",
"outputs",
"or",
"any",
"of",
"the",
"outputs",
"does",
"not",
"exist",
"."
] |
fd73907dbe86880086719816bb042233f85121f6
|
https://github.com/za-creature/gulpless/blob/fd73907dbe86880086719816bb042233f85121f6/gulpless/handlers.py#L58-L70
|
240,458
|
za-creature/gulpless
|
gulpless/handlers.py
|
Handler.build
|
def build(self, input_path, output_paths):
"""Should be extended by subclasses to actually do stuff. By default
this will copy `input` over every file in the `outputs` list."""
for output in output_paths:
shutil.copy(input_path, output_paths)
|
python
|
def build(self, input_path, output_paths):
"""Should be extended by subclasses to actually do stuff. By default
this will copy `input` over every file in the `outputs` list."""
for output in output_paths:
shutil.copy(input_path, output_paths)
|
[
"def",
"build",
"(",
"self",
",",
"input_path",
",",
"output_paths",
")",
":",
"for",
"output",
"in",
"output_paths",
":",
"shutil",
".",
"copy",
"(",
"input_path",
",",
"output_paths",
")"
] |
Should be extended by subclasses to actually do stuff. By default
this will copy `input` over every file in the `outputs` list.
|
[
"Should",
"be",
"extended",
"by",
"subclasses",
"to",
"actually",
"do",
"stuff",
".",
"By",
"default",
"this",
"will",
"copy",
"input",
"over",
"every",
"file",
"in",
"the",
"outputs",
"list",
"."
] |
fd73907dbe86880086719816bb042233f85121f6
|
https://github.com/za-creature/gulpless/blob/fd73907dbe86880086719816bb042233f85121f6/gulpless/handlers.py#L128-L132
|
240,459
|
za-creature/gulpless
|
gulpless/handlers.py
|
TreeHandler.rebuild_references
|
def rebuild_references(self, src, path, reject=None):
"""Updates `parents` and `children` to be in sync with the changes to
`src` if any."""
if reject is None:
reject = set()
reject.add(path)
try:
filename = os.path.join(src, path)
mtime = os.path.getmtime(filename)
contents = open(filename)
except EnvironmentError:
raise ValueError("Unable to open '{0}'".format(path))
if \
path in self.parents and \
self.parents[path].updated == mtime:
# cache hit; no need to update
return
# drop existing references
if path in self.parents:
self.deleted(src, path)
# build a list of parents
parents = TimedSet(mtime)
current = os.path.dirname(path)
for line in contents:
match = self.line_regex.search(line)
if match:
parent = match.group(1)
relative = os.path.normpath(os.path.join(current, parent))
if relative.startswith(".."):
raise ValueError("Parent reference '{0}' outside of "
"watched folder in '{1}'".format(parent,
path))
parent = os.path.normcase(relative)
if parent in reject:
raise ValueError("Circular reference to '{0}' "
"detected in '{1}'".format(parent,
path))
parents.add(parent)
for parent in parents:
# recursively build references for all parents; this will
# usually be a cache hit and no-op
self.rebuild_references(src, parent, reject)
self.parents[path] = parents
for parent in parents:
# add this node to each of its parents' children
if parent not in self.children:
self.children[parent] = set()
self.children[parent].add(path)
|
python
|
def rebuild_references(self, src, path, reject=None):
"""Updates `parents` and `children` to be in sync with the changes to
`src` if any."""
if reject is None:
reject = set()
reject.add(path)
try:
filename = os.path.join(src, path)
mtime = os.path.getmtime(filename)
contents = open(filename)
except EnvironmentError:
raise ValueError("Unable to open '{0}'".format(path))
if \
path in self.parents and \
self.parents[path].updated == mtime:
# cache hit; no need to update
return
# drop existing references
if path in self.parents:
self.deleted(src, path)
# build a list of parents
parents = TimedSet(mtime)
current = os.path.dirname(path)
for line in contents:
match = self.line_regex.search(line)
if match:
parent = match.group(1)
relative = os.path.normpath(os.path.join(current, parent))
if relative.startswith(".."):
raise ValueError("Parent reference '{0}' outside of "
"watched folder in '{1}'".format(parent,
path))
parent = os.path.normcase(relative)
if parent in reject:
raise ValueError("Circular reference to '{0}' "
"detected in '{1}'".format(parent,
path))
parents.add(parent)
for parent in parents:
# recursively build references for all parents; this will
# usually be a cache hit and no-op
self.rebuild_references(src, parent, reject)
self.parents[path] = parents
for parent in parents:
# add this node to each of its parents' children
if parent not in self.children:
self.children[parent] = set()
self.children[parent].add(path)
|
[
"def",
"rebuild_references",
"(",
"self",
",",
"src",
",",
"path",
",",
"reject",
"=",
"None",
")",
":",
"if",
"reject",
"is",
"None",
":",
"reject",
"=",
"set",
"(",
")",
"reject",
".",
"add",
"(",
"path",
")",
"try",
":",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"src",
",",
"path",
")",
"mtime",
"=",
"os",
".",
"path",
".",
"getmtime",
"(",
"filename",
")",
"contents",
"=",
"open",
"(",
"filename",
")",
"except",
"EnvironmentError",
":",
"raise",
"ValueError",
"(",
"\"Unable to open '{0}'\"",
".",
"format",
"(",
"path",
")",
")",
"if",
"path",
"in",
"self",
".",
"parents",
"and",
"self",
".",
"parents",
"[",
"path",
"]",
".",
"updated",
"==",
"mtime",
":",
"# cache hit; no need to update",
"return",
"# drop existing references",
"if",
"path",
"in",
"self",
".",
"parents",
":",
"self",
".",
"deleted",
"(",
"src",
",",
"path",
")",
"# build a list of parents",
"parents",
"=",
"TimedSet",
"(",
"mtime",
")",
"current",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
"for",
"line",
"in",
"contents",
":",
"match",
"=",
"self",
".",
"line_regex",
".",
"search",
"(",
"line",
")",
"if",
"match",
":",
"parent",
"=",
"match",
".",
"group",
"(",
"1",
")",
"relative",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"current",
",",
"parent",
")",
")",
"if",
"relative",
".",
"startswith",
"(",
"\"..\"",
")",
":",
"raise",
"ValueError",
"(",
"\"Parent reference '{0}' outside of \"",
"\"watched folder in '{1}'\"",
".",
"format",
"(",
"parent",
",",
"path",
")",
")",
"parent",
"=",
"os",
".",
"path",
".",
"normcase",
"(",
"relative",
")",
"if",
"parent",
"in",
"reject",
":",
"raise",
"ValueError",
"(",
"\"Circular reference to '{0}' \"",
"\"detected in '{1}'\"",
".",
"format",
"(",
"parent",
",",
"path",
")",
")",
"parents",
".",
"add",
"(",
"parent",
")",
"for",
"parent",
"in",
"parents",
":",
"# recursively build references for all parents; this will",
"# usually be a cache hit and no-op",
"self",
".",
"rebuild_references",
"(",
"src",
",",
"parent",
",",
"reject",
")",
"self",
".",
"parents",
"[",
"path",
"]",
"=",
"parents",
"for",
"parent",
"in",
"parents",
":",
"# add this node to each of its parents' children",
"if",
"parent",
"not",
"in",
"self",
".",
"children",
":",
"self",
".",
"children",
"[",
"parent",
"]",
"=",
"set",
"(",
")",
"self",
".",
"children",
"[",
"parent",
"]",
".",
"add",
"(",
"path",
")"
] |
Updates `parents` and `children` to be in sync with the changes to
`src` if any.
|
[
"Updates",
"parents",
"and",
"children",
"to",
"be",
"in",
"sync",
"with",
"the",
"changes",
"to",
"src",
"if",
"any",
"."
] |
fd73907dbe86880086719816bb042233f85121f6
|
https://github.com/za-creature/gulpless/blob/fd73907dbe86880086719816bb042233f85121f6/gulpless/handlers.py#L150-L204
|
240,460
|
za-creature/gulpless
|
gulpless/handlers.py
|
TreeHandler.deleted
|
def deleted(self, src, path):
"""Update the reference tree when a handled file is deleted."""
if self.parents[path] is not None:
for parent in self.parents[path]:
self.children[parent].remove(path)
if not self.children[parent]:
del self.children[parent]
del self.parents[path]
|
python
|
def deleted(self, src, path):
"""Update the reference tree when a handled file is deleted."""
if self.parents[path] is not None:
for parent in self.parents[path]:
self.children[parent].remove(path)
if not self.children[parent]:
del self.children[parent]
del self.parents[path]
|
[
"def",
"deleted",
"(",
"self",
",",
"src",
",",
"path",
")",
":",
"if",
"self",
".",
"parents",
"[",
"path",
"]",
"is",
"not",
"None",
":",
"for",
"parent",
"in",
"self",
".",
"parents",
"[",
"path",
"]",
":",
"self",
".",
"children",
"[",
"parent",
"]",
".",
"remove",
"(",
"path",
")",
"if",
"not",
"self",
".",
"children",
"[",
"parent",
"]",
":",
"del",
"self",
".",
"children",
"[",
"parent",
"]",
"del",
"self",
".",
"parents",
"[",
"path",
"]"
] |
Update the reference tree when a handled file is deleted.
|
[
"Update",
"the",
"reference",
"tree",
"when",
"a",
"handled",
"file",
"is",
"deleted",
"."
] |
fd73907dbe86880086719816bb042233f85121f6
|
https://github.com/za-creature/gulpless/blob/fd73907dbe86880086719816bb042233f85121f6/gulpless/handlers.py#L233-L240
|
240,461
|
kyan001/PyKyanToolKit
|
KyanToolKit.py
|
KyanToolKit.clearScreen
|
def clearScreen(cls):
"""Clear the screen"""
if "win32" in sys.platform:
os.system('cls')
elif "linux" in sys.platform:
os.system('clear')
elif 'darwin' in sys.platform:
os.system('clear')
else:
cit.err("No clearScreen for " + sys.platform)
|
python
|
def clearScreen(cls):
"""Clear the screen"""
if "win32" in sys.platform:
os.system('cls')
elif "linux" in sys.platform:
os.system('clear')
elif 'darwin' in sys.platform:
os.system('clear')
else:
cit.err("No clearScreen for " + sys.platform)
|
[
"def",
"clearScreen",
"(",
"cls",
")",
":",
"if",
"\"win32\"",
"in",
"sys",
".",
"platform",
":",
"os",
".",
"system",
"(",
"'cls'",
")",
"elif",
"\"linux\"",
"in",
"sys",
".",
"platform",
":",
"os",
".",
"system",
"(",
"'clear'",
")",
"elif",
"'darwin'",
"in",
"sys",
".",
"platform",
":",
"os",
".",
"system",
"(",
"'clear'",
")",
"else",
":",
"cit",
".",
"err",
"(",
"\"No clearScreen for \"",
"+",
"sys",
".",
"platform",
")"
] |
Clear the screen
|
[
"Clear",
"the",
"screen"
] |
a3974fcd45ce41f743b4a3d42af961fedea8fda8
|
https://github.com/kyan001/PyKyanToolKit/blob/a3974fcd45ce41f743b4a3d42af961fedea8fda8/KyanToolKit.py#L98-L107
|
240,462
|
kyan001/PyKyanToolKit
|
KyanToolKit.py
|
KyanToolKit.getPyCmd
|
def getPyCmd(cls):
"""get OS's python command"""
if "win32" in sys.platform:
return 'py'
elif "linux" in sys.platform:
return 'python3'
elif 'darwin' in sys.platform:
return 'python3'
else:
cit.err("No python3 command for " + sys.platform)
|
python
|
def getPyCmd(cls):
"""get OS's python command"""
if "win32" in sys.platform:
return 'py'
elif "linux" in sys.platform:
return 'python3'
elif 'darwin' in sys.platform:
return 'python3'
else:
cit.err("No python3 command for " + sys.platform)
|
[
"def",
"getPyCmd",
"(",
"cls",
")",
":",
"if",
"\"win32\"",
"in",
"sys",
".",
"platform",
":",
"return",
"'py'",
"elif",
"\"linux\"",
"in",
"sys",
".",
"platform",
":",
"return",
"'python3'",
"elif",
"'darwin'",
"in",
"sys",
".",
"platform",
":",
"return",
"'python3'",
"else",
":",
"cit",
".",
"err",
"(",
"\"No python3 command for \"",
"+",
"sys",
".",
"platform",
")"
] |
get OS's python command
|
[
"get",
"OS",
"s",
"python",
"command"
] |
a3974fcd45ce41f743b4a3d42af961fedea8fda8
|
https://github.com/kyan001/PyKyanToolKit/blob/a3974fcd45ce41f743b4a3d42af961fedea8fda8/KyanToolKit.py#L110-L119
|
240,463
|
kyan001/PyKyanToolKit
|
KyanToolKit.py
|
KyanToolKit.runCmd
|
def runCmd(cls, cmd):
"""run command and show if success or failed
Args:
cmd: string
Returns:
bool: if this command run successfully
"""
cit.echo(cmd, "command")
result = os.system(cmd)
cls.checkResult(result)
|
python
|
def runCmd(cls, cmd):
"""run command and show if success or failed
Args:
cmd: string
Returns:
bool: if this command run successfully
"""
cit.echo(cmd, "command")
result = os.system(cmd)
cls.checkResult(result)
|
[
"def",
"runCmd",
"(",
"cls",
",",
"cmd",
")",
":",
"cit",
".",
"echo",
"(",
"cmd",
",",
"\"command\"",
")",
"result",
"=",
"os",
".",
"system",
"(",
"cmd",
")",
"cls",
".",
"checkResult",
"(",
"result",
")"
] |
run command and show if success or failed
Args:
cmd: string
Returns:
bool: if this command run successfully
|
[
"run",
"command",
"and",
"show",
"if",
"success",
"or",
"failed"
] |
a3974fcd45ce41f743b4a3d42af961fedea8fda8
|
https://github.com/kyan001/PyKyanToolKit/blob/a3974fcd45ce41f743b4a3d42af961fedea8fda8/KyanToolKit.py#L123-L133
|
240,464
|
kyan001/PyKyanToolKit
|
KyanToolKit.py
|
KyanToolKit.readCmd
|
def readCmd(cls, cmd):
"""run command and return the str format stdout
Args:
cmd: string
Returns:
str: what the command's echo
"""
args = shlex.split(cmd)
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
(proc_stdout, proc_stderr) = proc.communicate(input=None) # proc_stdin
return proc_stdout.decode()
|
python
|
def readCmd(cls, cmd):
"""run command and return the str format stdout
Args:
cmd: string
Returns:
str: what the command's echo
"""
args = shlex.split(cmd)
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
(proc_stdout, proc_stderr) = proc.communicate(input=None) # proc_stdin
return proc_stdout.decode()
|
[
"def",
"readCmd",
"(",
"cls",
",",
"cmd",
")",
":",
"args",
"=",
"shlex",
".",
"split",
"(",
"cmd",
")",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
"args",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"(",
"proc_stdout",
",",
"proc_stderr",
")",
"=",
"proc",
".",
"communicate",
"(",
"input",
"=",
"None",
")",
"# proc_stdin",
"return",
"proc_stdout",
".",
"decode",
"(",
")"
] |
run command and return the str format stdout
Args:
cmd: string
Returns:
str: what the command's echo
|
[
"run",
"command",
"and",
"return",
"the",
"str",
"format",
"stdout"
] |
a3974fcd45ce41f743b4a3d42af961fedea8fda8
|
https://github.com/kyan001/PyKyanToolKit/blob/a3974fcd45ce41f743b4a3d42af961fedea8fda8/KyanToolKit.py#L137-L148
|
240,465
|
kyan001/PyKyanToolKit
|
KyanToolKit.py
|
KyanToolKit.updateFile
|
def updateFile(cls, file_, url):
"""Check and update file compares with remote_url
Args:
file_: str. Local filename. Normally it's __file__
url: str. Remote url of raw file content. Normally it's https://raw.github.com/...
Returns:
bool: file updated or not
"""
def compare(s1, s2):
return s1 == s2, len(s2) - len(s1)
if not url or not file_:
return False
try:
req = urllib.request.urlopen(url)
raw_codes = req.read()
with open(file_, 'rb') as f:
current_codes = f.read().replace(b'\r', b'')
is_same, diff = compare(current_codes, raw_codes)
if is_same:
cit.info("{} is already up-to-date.".format(file_))
return False
else:
cit.ask("A new version is available. Update? (Diff: {})".format(diff))
if cit.get_choice(['Yes', 'No']) == 'Yes':
with open(file_, 'wb') as f:
f.write(raw_codes)
cit.info("Update Success.")
return True
else:
cit.warn("Update Canceled")
return False
except Exception as e:
cit.err("{f} update failed: {e}".format(f=file_, e=e))
return False
|
python
|
def updateFile(cls, file_, url):
"""Check and update file compares with remote_url
Args:
file_: str. Local filename. Normally it's __file__
url: str. Remote url of raw file content. Normally it's https://raw.github.com/...
Returns:
bool: file updated or not
"""
def compare(s1, s2):
return s1 == s2, len(s2) - len(s1)
if not url or not file_:
return False
try:
req = urllib.request.urlopen(url)
raw_codes = req.read()
with open(file_, 'rb') as f:
current_codes = f.read().replace(b'\r', b'')
is_same, diff = compare(current_codes, raw_codes)
if is_same:
cit.info("{} is already up-to-date.".format(file_))
return False
else:
cit.ask("A new version is available. Update? (Diff: {})".format(diff))
if cit.get_choice(['Yes', 'No']) == 'Yes':
with open(file_, 'wb') as f:
f.write(raw_codes)
cit.info("Update Success.")
return True
else:
cit.warn("Update Canceled")
return False
except Exception as e:
cit.err("{f} update failed: {e}".format(f=file_, e=e))
return False
|
[
"def",
"updateFile",
"(",
"cls",
",",
"file_",
",",
"url",
")",
":",
"def",
"compare",
"(",
"s1",
",",
"s2",
")",
":",
"return",
"s1",
"==",
"s2",
",",
"len",
"(",
"s2",
")",
"-",
"len",
"(",
"s1",
")",
"if",
"not",
"url",
"or",
"not",
"file_",
":",
"return",
"False",
"try",
":",
"req",
"=",
"urllib",
".",
"request",
".",
"urlopen",
"(",
"url",
")",
"raw_codes",
"=",
"req",
".",
"read",
"(",
")",
"with",
"open",
"(",
"file_",
",",
"'rb'",
")",
"as",
"f",
":",
"current_codes",
"=",
"f",
".",
"read",
"(",
")",
".",
"replace",
"(",
"b'\\r'",
",",
"b''",
")",
"is_same",
",",
"diff",
"=",
"compare",
"(",
"current_codes",
",",
"raw_codes",
")",
"if",
"is_same",
":",
"cit",
".",
"info",
"(",
"\"{} is already up-to-date.\"",
".",
"format",
"(",
"file_",
")",
")",
"return",
"False",
"else",
":",
"cit",
".",
"ask",
"(",
"\"A new version is available. Update? (Diff: {})\"",
".",
"format",
"(",
"diff",
")",
")",
"if",
"cit",
".",
"get_choice",
"(",
"[",
"'Yes'",
",",
"'No'",
"]",
")",
"==",
"'Yes'",
":",
"with",
"open",
"(",
"file_",
",",
"'wb'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"raw_codes",
")",
"cit",
".",
"info",
"(",
"\"Update Success.\"",
")",
"return",
"True",
"else",
":",
"cit",
".",
"warn",
"(",
"\"Update Canceled\"",
")",
"return",
"False",
"except",
"Exception",
"as",
"e",
":",
"cit",
".",
"err",
"(",
"\"{f} update failed: {e}\"",
".",
"format",
"(",
"f",
"=",
"file_",
",",
"e",
"=",
"e",
")",
")",
"return",
"False"
] |
Check and update file compares with remote_url
Args:
file_: str. Local filename. Normally it's __file__
url: str. Remote url of raw file content. Normally it's https://raw.github.com/...
Returns:
bool: file updated or not
|
[
"Check",
"and",
"update",
"file",
"compares",
"with",
"remote_url"
] |
a3974fcd45ce41f743b4a3d42af961fedea8fda8
|
https://github.com/kyan001/PyKyanToolKit/blob/a3974fcd45ce41f743b4a3d42af961fedea8fda8/KyanToolKit.py#L151-L186
|
240,466
|
kyan001/PyKyanToolKit
|
KyanToolKit.py
|
KyanToolKit.ajax
|
def ajax(cls, url, param={}, method='get'):
"""Get info by ajax
Args:
url: string
Returns:
dict: json decoded into a dict
"""
param = urllib.parse.urlencode(param)
if method.lower() == 'get':
req = urllib.request.Request(url + '?' + param)
elif method.lower() == 'post':
param = param.encode('utf-8')
req = urllib.request.Request(url, data=param)
else:
raise Exception("invalid method '{}' (GET/POST)".format(method))
rsp = urllib.request.urlopen(req)
if rsp:
rsp_json = rsp.read().decode('utf-8')
rsp_dict = json.loads(rsp_json)
return rsp_dict
return None
|
python
|
def ajax(cls, url, param={}, method='get'):
"""Get info by ajax
Args:
url: string
Returns:
dict: json decoded into a dict
"""
param = urllib.parse.urlencode(param)
if method.lower() == 'get':
req = urllib.request.Request(url + '?' + param)
elif method.lower() == 'post':
param = param.encode('utf-8')
req = urllib.request.Request(url, data=param)
else:
raise Exception("invalid method '{}' (GET/POST)".format(method))
rsp = urllib.request.urlopen(req)
if rsp:
rsp_json = rsp.read().decode('utf-8')
rsp_dict = json.loads(rsp_json)
return rsp_dict
return None
|
[
"def",
"ajax",
"(",
"cls",
",",
"url",
",",
"param",
"=",
"{",
"}",
",",
"method",
"=",
"'get'",
")",
":",
"param",
"=",
"urllib",
".",
"parse",
".",
"urlencode",
"(",
"param",
")",
"if",
"method",
".",
"lower",
"(",
")",
"==",
"'get'",
":",
"req",
"=",
"urllib",
".",
"request",
".",
"Request",
"(",
"url",
"+",
"'?'",
"+",
"param",
")",
"elif",
"method",
".",
"lower",
"(",
")",
"==",
"'post'",
":",
"param",
"=",
"param",
".",
"encode",
"(",
"'utf-8'",
")",
"req",
"=",
"urllib",
".",
"request",
".",
"Request",
"(",
"url",
",",
"data",
"=",
"param",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"invalid method '{}' (GET/POST)\"",
".",
"format",
"(",
"method",
")",
")",
"rsp",
"=",
"urllib",
".",
"request",
".",
"urlopen",
"(",
"req",
")",
"if",
"rsp",
":",
"rsp_json",
"=",
"rsp",
".",
"read",
"(",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"rsp_dict",
"=",
"json",
".",
"loads",
"(",
"rsp_json",
")",
"return",
"rsp_dict",
"return",
"None"
] |
Get info by ajax
Args:
url: string
Returns:
dict: json decoded into a dict
|
[
"Get",
"info",
"by",
"ajax"
] |
a3974fcd45ce41f743b4a3d42af961fedea8fda8
|
https://github.com/kyan001/PyKyanToolKit/blob/a3974fcd45ce41f743b4a3d42af961fedea8fda8/KyanToolKit.py#L190-L211
|
240,467
|
pacificclimate/cfmeta
|
cfmeta/path.py
|
get_dir_meta
|
def get_dir_meta(fp, atts):
"""Pop path information and map to supplied atts
"""
# Attibutes are popped from deepest directory first
atts.reverse()
dirname = os.path.split(fp)[0]
meta = dirname.split('/')
res = {}
try:
for key in atts:
res[key] = meta.pop()
except IndexError:
raise PathError(dirname)
return res
|
python
|
def get_dir_meta(fp, atts):
"""Pop path information and map to supplied atts
"""
# Attibutes are popped from deepest directory first
atts.reverse()
dirname = os.path.split(fp)[0]
meta = dirname.split('/')
res = {}
try:
for key in atts:
res[key] = meta.pop()
except IndexError:
raise PathError(dirname)
return res
|
[
"def",
"get_dir_meta",
"(",
"fp",
",",
"atts",
")",
":",
"# Attibutes are popped from deepest directory first",
"atts",
".",
"reverse",
"(",
")",
"dirname",
"=",
"os",
".",
"path",
".",
"split",
"(",
"fp",
")",
"[",
"0",
"]",
"meta",
"=",
"dirname",
".",
"split",
"(",
"'/'",
")",
"res",
"=",
"{",
"}",
"try",
":",
"for",
"key",
"in",
"atts",
":",
"res",
"[",
"key",
"]",
"=",
"meta",
".",
"pop",
"(",
")",
"except",
"IndexError",
":",
"raise",
"PathError",
"(",
"dirname",
")",
"return",
"res"
] |
Pop path information and map to supplied atts
|
[
"Pop",
"path",
"information",
"and",
"map",
"to",
"supplied",
"atts"
] |
a6eef78d0bce523bb44920ba96233f034b60316a
|
https://github.com/pacificclimate/cfmeta/blob/a6eef78d0bce523bb44920ba96233f034b60316a/cfmeta/path.py#L10-L28
|
240,468
|
krukas/Trionyx
|
trionyx/trionyx/context_processors.py
|
trionyx
|
def trionyx(request):
"""Add trionyx context data"""
return {
'TX_APP_NAME': settings.TX_APP_NAME,
'TX_LOGO_NAME_START': settings.TX_LOGO_NAME_START,
'TX_LOGO_NAME_END': settings.TX_LOGO_NAME_END,
'TX_LOGO_NAME_SMALL_START': settings.TX_LOGO_NAME_SMALL_START,
'TX_LOGO_NAME_SMALL_END': settings.TX_LOGO_NAME_SMALL_END,
'trionyx_menu_items': app_menu.get_menu_items(),
}
|
python
|
def trionyx(request):
"""Add trionyx context data"""
return {
'TX_APP_NAME': settings.TX_APP_NAME,
'TX_LOGO_NAME_START': settings.TX_LOGO_NAME_START,
'TX_LOGO_NAME_END': settings.TX_LOGO_NAME_END,
'TX_LOGO_NAME_SMALL_START': settings.TX_LOGO_NAME_SMALL_START,
'TX_LOGO_NAME_SMALL_END': settings.TX_LOGO_NAME_SMALL_END,
'trionyx_menu_items': app_menu.get_menu_items(),
}
|
[
"def",
"trionyx",
"(",
"request",
")",
":",
"return",
"{",
"'TX_APP_NAME'",
":",
"settings",
".",
"TX_APP_NAME",
",",
"'TX_LOGO_NAME_START'",
":",
"settings",
".",
"TX_LOGO_NAME_START",
",",
"'TX_LOGO_NAME_END'",
":",
"settings",
".",
"TX_LOGO_NAME_END",
",",
"'TX_LOGO_NAME_SMALL_START'",
":",
"settings",
".",
"TX_LOGO_NAME_SMALL_START",
",",
"'TX_LOGO_NAME_SMALL_END'",
":",
"settings",
".",
"TX_LOGO_NAME_SMALL_END",
",",
"'trionyx_menu_items'",
":",
"app_menu",
".",
"get_menu_items",
"(",
")",
",",
"}"
] |
Add trionyx context data
|
[
"Add",
"trionyx",
"context",
"data"
] |
edac132cc0797190153f2e60bc7e88cb50e80da6
|
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/trionyx/context_processors.py#L12-L22
|
240,469
|
funkybob/antfarm
|
antfarm/views/urls.py
|
url_dispatcher.register
|
def register(self, pattern, view=None):
'''Allow decorator-style construction of URL pattern lists.'''
if view is None:
return partial(self.register, pattern)
self.patterns.append(self._make_url((pattern, view)))
return view
|
python
|
def register(self, pattern, view=None):
'''Allow decorator-style construction of URL pattern lists.'''
if view is None:
return partial(self.register, pattern)
self.patterns.append(self._make_url((pattern, view)))
return view
|
[
"def",
"register",
"(",
"self",
",",
"pattern",
",",
"view",
"=",
"None",
")",
":",
"if",
"view",
"is",
"None",
":",
"return",
"partial",
"(",
"self",
".",
"register",
",",
"pattern",
")",
"self",
".",
"patterns",
".",
"append",
"(",
"self",
".",
"_make_url",
"(",
"(",
"pattern",
",",
"view",
")",
")",
")",
"return",
"view"
] |
Allow decorator-style construction of URL pattern lists.
|
[
"Allow",
"decorator",
"-",
"style",
"construction",
"of",
"URL",
"pattern",
"lists",
"."
] |
40a7cc450eba09a280b7bc8f7c68a807b0177c62
|
https://github.com/funkybob/antfarm/blob/40a7cc450eba09a280b7bc8f7c68a807b0177c62/antfarm/views/urls.py#L56-L61
|
240,470
|
krukas/Trionyx
|
trionyx/models.py
|
BaseModel.get_fields
|
def get_fields(cls, inlcude_base=False, include_id=False):
"""Get model fields"""
for field in cls._meta.fields:
if field.name == 'deleted':
continue
if not include_id and field.name == 'id':
continue
if not inlcude_base and field.name in ['created_at', 'updated_at']:
continue
yield field
|
python
|
def get_fields(cls, inlcude_base=False, include_id=False):
"""Get model fields"""
for field in cls._meta.fields:
if field.name == 'deleted':
continue
if not include_id and field.name == 'id':
continue
if not inlcude_base and field.name in ['created_at', 'updated_at']:
continue
yield field
|
[
"def",
"get_fields",
"(",
"cls",
",",
"inlcude_base",
"=",
"False",
",",
"include_id",
"=",
"False",
")",
":",
"for",
"field",
"in",
"cls",
".",
"_meta",
".",
"fields",
":",
"if",
"field",
".",
"name",
"==",
"'deleted'",
":",
"continue",
"if",
"not",
"include_id",
"and",
"field",
".",
"name",
"==",
"'id'",
":",
"continue",
"if",
"not",
"inlcude_base",
"and",
"field",
".",
"name",
"in",
"[",
"'created_at'",
",",
"'updated_at'",
"]",
":",
"continue",
"yield",
"field"
] |
Get model fields
|
[
"Get",
"model",
"fields"
] |
edac132cc0797190153f2e60bc7e88cb50e80da6
|
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/models.py#L60-L69
|
240,471
|
krukas/Trionyx
|
trionyx/models.py
|
BaseModel.get_absolute_url
|
def get_absolute_url(self):
"""Get model url"""
return reverse('trionyx:model-view', kwargs={
'app': self._meta.app_label,
'model': self._meta.model_name,
'pk': self.id
})
|
python
|
def get_absolute_url(self):
"""Get model url"""
return reverse('trionyx:model-view', kwargs={
'app': self._meta.app_label,
'model': self._meta.model_name,
'pk': self.id
})
|
[
"def",
"get_absolute_url",
"(",
"self",
")",
":",
"return",
"reverse",
"(",
"'trionyx:model-view'",
",",
"kwargs",
"=",
"{",
"'app'",
":",
"self",
".",
"_meta",
".",
"app_label",
",",
"'model'",
":",
"self",
".",
"_meta",
".",
"model_name",
",",
"'pk'",
":",
"self",
".",
"id",
"}",
")"
] |
Get model url
|
[
"Get",
"model",
"url"
] |
edac132cc0797190153f2e60bc7e88cb50e80da6
|
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/models.py#L78-L84
|
240,472
|
francbartoli/marmee
|
marmee/utils/parser.py
|
Stac.parse
|
def parse(self):
"""Parse an asset from Earth Engine to STAC item
Raises:
ValueError -- If asset is not of type Image or ImageCollection
Returns:
Item -- STAC feature of the Google Earth Engine Asset
Collection -- STAC collection of the Google Earth Engine Asset
"""
if self.type == TOKEN_TYPE[0][1]:
try:
return Item(
item_id=self._link(None, None)[1],
links=self._link(None, None)[0],
assets=self._asset(None),
properties=self._properties(None)[0],
geometry=self._properties(None)[2]
)
except ValidationError as e:
raise
elif self.type == TOKEN_TYPE[1][1]:
try:
# parallelize item computation
items = [self._features_iterator(
feature['id'],
self._link(feature, data.ASSET_TYPE_IMAGE_COLL)[0],
self._asset(
feature['properties']['system:index']
),
self._properties(feature)[0],
self._properties(feature)[2]
) for feature in self._get_full_info()['features']]
res_list = dask.compute(items)[0]
return Collection(
collection_id=self._get_info()['id'],
features=res_list
)
except ValidationError as e:
raise
else:
raise TypeError("Unrecognized Stac type found.")
|
python
|
def parse(self):
"""Parse an asset from Earth Engine to STAC item
Raises:
ValueError -- If asset is not of type Image or ImageCollection
Returns:
Item -- STAC feature of the Google Earth Engine Asset
Collection -- STAC collection of the Google Earth Engine Asset
"""
if self.type == TOKEN_TYPE[0][1]:
try:
return Item(
item_id=self._link(None, None)[1],
links=self._link(None, None)[0],
assets=self._asset(None),
properties=self._properties(None)[0],
geometry=self._properties(None)[2]
)
except ValidationError as e:
raise
elif self.type == TOKEN_TYPE[1][1]:
try:
# parallelize item computation
items = [self._features_iterator(
feature['id'],
self._link(feature, data.ASSET_TYPE_IMAGE_COLL)[0],
self._asset(
feature['properties']['system:index']
),
self._properties(feature)[0],
self._properties(feature)[2]
) for feature in self._get_full_info()['features']]
res_list = dask.compute(items)[0]
return Collection(
collection_id=self._get_info()['id'],
features=res_list
)
except ValidationError as e:
raise
else:
raise TypeError("Unrecognized Stac type found.")
|
[
"def",
"parse",
"(",
"self",
")",
":",
"if",
"self",
".",
"type",
"==",
"TOKEN_TYPE",
"[",
"0",
"]",
"[",
"1",
"]",
":",
"try",
":",
"return",
"Item",
"(",
"item_id",
"=",
"self",
".",
"_link",
"(",
"None",
",",
"None",
")",
"[",
"1",
"]",
",",
"links",
"=",
"self",
".",
"_link",
"(",
"None",
",",
"None",
")",
"[",
"0",
"]",
",",
"assets",
"=",
"self",
".",
"_asset",
"(",
"None",
")",
",",
"properties",
"=",
"self",
".",
"_properties",
"(",
"None",
")",
"[",
"0",
"]",
",",
"geometry",
"=",
"self",
".",
"_properties",
"(",
"None",
")",
"[",
"2",
"]",
")",
"except",
"ValidationError",
"as",
"e",
":",
"raise",
"elif",
"self",
".",
"type",
"==",
"TOKEN_TYPE",
"[",
"1",
"]",
"[",
"1",
"]",
":",
"try",
":",
"# parallelize item computation",
"items",
"=",
"[",
"self",
".",
"_features_iterator",
"(",
"feature",
"[",
"'id'",
"]",
",",
"self",
".",
"_link",
"(",
"feature",
",",
"data",
".",
"ASSET_TYPE_IMAGE_COLL",
")",
"[",
"0",
"]",
",",
"self",
".",
"_asset",
"(",
"feature",
"[",
"'properties'",
"]",
"[",
"'system:index'",
"]",
")",
",",
"self",
".",
"_properties",
"(",
"feature",
")",
"[",
"0",
"]",
",",
"self",
".",
"_properties",
"(",
"feature",
")",
"[",
"2",
"]",
")",
"for",
"feature",
"in",
"self",
".",
"_get_full_info",
"(",
")",
"[",
"'features'",
"]",
"]",
"res_list",
"=",
"dask",
".",
"compute",
"(",
"items",
")",
"[",
"0",
"]",
"return",
"Collection",
"(",
"collection_id",
"=",
"self",
".",
"_get_info",
"(",
")",
"[",
"'id'",
"]",
",",
"features",
"=",
"res_list",
")",
"except",
"ValidationError",
"as",
"e",
":",
"raise",
"else",
":",
"raise",
"TypeError",
"(",
"\"Unrecognized Stac type found.\"",
")"
] |
Parse an asset from Earth Engine to STAC item
Raises:
ValueError -- If asset is not of type Image or ImageCollection
Returns:
Item -- STAC feature of the Google Earth Engine Asset
Collection -- STAC collection of the Google Earth Engine Asset
|
[
"Parse",
"an",
"asset",
"from",
"Earth",
"Engine",
"to",
"STAC",
"item"
] |
f971adc84636629b9afbe79c917b74cac81fabf2
|
https://github.com/francbartoli/marmee/blob/f971adc84636629b9afbe79c917b74cac81fabf2/marmee/utils/parser.py#L40-L83
|
240,473
|
Stufinite/cphelper
|
cphelper/views.py
|
Genra
|
def Genra(request):
"""
Generate dict of Dept and its grade.
"""
school = request.GET['school']
c = Course(school=school)
return JsonResponse(c.getGenra(), safe=False)
|
python
|
def Genra(request):
"""
Generate dict of Dept and its grade.
"""
school = request.GET['school']
c = Course(school=school)
return JsonResponse(c.getGenra(), safe=False)
|
[
"def",
"Genra",
"(",
"request",
")",
":",
"school",
"=",
"request",
".",
"GET",
"[",
"'school'",
"]",
"c",
"=",
"Course",
"(",
"school",
"=",
"school",
")",
"return",
"JsonResponse",
"(",
"c",
".",
"getGenra",
"(",
")",
",",
"safe",
"=",
"False",
")"
] |
Generate dict of Dept and its grade.
|
[
"Generate",
"dict",
"of",
"Dept",
"and",
"its",
"grade",
"."
] |
ae70f928ae8d65f7a62ed8b8e2836bcc777c5eac
|
https://github.com/Stufinite/cphelper/blob/ae70f928ae8d65f7a62ed8b8e2836bcc777c5eac/cphelper/views.py#L59-L65
|
240,474
|
esterhui/pypu
|
pypu/service_wp.py
|
service_wp.Remove
|
def Remove(self,directory,filename):
"""Deletes post from wordpress"""
db = self._loadDB(directory)
logger.debug("wp: Attempting to remove %s from wp"%(filename))
# See if this already exists in our DB
if db.has_key(filename):
pid=db[filename]
logger.debug('wp: Found %s in DB with post id %s'%(filename,pid))
else:
print("wp: %s not in our local DB file [%s]"\
%(filename,self.DB_FILE))
return False
self._connectToWP()
self.wp.call(DeletePost(pid))
del db[filename]
self._saveDB(directory,db)
return True
|
python
|
def Remove(self,directory,filename):
"""Deletes post from wordpress"""
db = self._loadDB(directory)
logger.debug("wp: Attempting to remove %s from wp"%(filename))
# See if this already exists in our DB
if db.has_key(filename):
pid=db[filename]
logger.debug('wp: Found %s in DB with post id %s'%(filename,pid))
else:
print("wp: %s not in our local DB file [%s]"\
%(filename,self.DB_FILE))
return False
self._connectToWP()
self.wp.call(DeletePost(pid))
del db[filename]
self._saveDB(directory,db)
return True
|
[
"def",
"Remove",
"(",
"self",
",",
"directory",
",",
"filename",
")",
":",
"db",
"=",
"self",
".",
"_loadDB",
"(",
"directory",
")",
"logger",
".",
"debug",
"(",
"\"wp: Attempting to remove %s from wp\"",
"%",
"(",
"filename",
")",
")",
"# See if this already exists in our DB",
"if",
"db",
".",
"has_key",
"(",
"filename",
")",
":",
"pid",
"=",
"db",
"[",
"filename",
"]",
"logger",
".",
"debug",
"(",
"'wp: Found %s in DB with post id %s'",
"%",
"(",
"filename",
",",
"pid",
")",
")",
"else",
":",
"print",
"(",
"\"wp: %s not in our local DB file [%s]\"",
"%",
"(",
"filename",
",",
"self",
".",
"DB_FILE",
")",
")",
"return",
"False",
"self",
".",
"_connectToWP",
"(",
")",
"self",
".",
"wp",
".",
"call",
"(",
"DeletePost",
"(",
"pid",
")",
")",
"del",
"db",
"[",
"filename",
"]",
"self",
".",
"_saveDB",
"(",
"directory",
",",
"db",
")",
"return",
"True"
] |
Deletes post from wordpress
|
[
"Deletes",
"post",
"from",
"wordpress"
] |
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
|
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/service_wp.py#L172-L194
|
240,475
|
sbuss/pypercube
|
pypercube/expression.py
|
EventExpression.eq
|
def eq(self, event_property, value):
"""An equals filter chain.
>>> request_time = EventExpression('request', 'elapsed_ms')
>>> filtered = request_time.eq('path', '/')
>>> print(filtered)
request(elapsed_ms).eq(path, "/")
"""
c = self.copy()
c.filters.append(filters.EQ(event_property, value))
return c
|
python
|
def eq(self, event_property, value):
"""An equals filter chain.
>>> request_time = EventExpression('request', 'elapsed_ms')
>>> filtered = request_time.eq('path', '/')
>>> print(filtered)
request(elapsed_ms).eq(path, "/")
"""
c = self.copy()
c.filters.append(filters.EQ(event_property, value))
return c
|
[
"def",
"eq",
"(",
"self",
",",
"event_property",
",",
"value",
")",
":",
"c",
"=",
"self",
".",
"copy",
"(",
")",
"c",
".",
"filters",
".",
"append",
"(",
"filters",
".",
"EQ",
"(",
"event_property",
",",
"value",
")",
")",
"return",
"c"
] |
An equals filter chain.
>>> request_time = EventExpression('request', 'elapsed_ms')
>>> filtered = request_time.eq('path', '/')
>>> print(filtered)
request(elapsed_ms).eq(path, "/")
|
[
"An",
"equals",
"filter",
"chain",
"."
] |
e9d2cca9c004b8bad6d1e0b68b080f887a186a22
|
https://github.com/sbuss/pypercube/blob/e9d2cca9c004b8bad6d1e0b68b080f887a186a22/pypercube/expression.py#L238-L248
|
240,476
|
sbuss/pypercube
|
pypercube/expression.py
|
EventExpression.ne
|
def ne(self, event_property, value):
"""A not-equal filter chain.
>>> request_time = EventExpression('request', 'elapsed_ms')
>>> filtered = request_time.ne('path', '/')
>>> print(filtered)
request(elapsed_ms).ne(path, "/")
"""
c = self.copy()
c.filters.append(filters.NE(event_property, value))
return c
|
python
|
def ne(self, event_property, value):
"""A not-equal filter chain.
>>> request_time = EventExpression('request', 'elapsed_ms')
>>> filtered = request_time.ne('path', '/')
>>> print(filtered)
request(elapsed_ms).ne(path, "/")
"""
c = self.copy()
c.filters.append(filters.NE(event_property, value))
return c
|
[
"def",
"ne",
"(",
"self",
",",
"event_property",
",",
"value",
")",
":",
"c",
"=",
"self",
".",
"copy",
"(",
")",
"c",
".",
"filters",
".",
"append",
"(",
"filters",
".",
"NE",
"(",
"event_property",
",",
"value",
")",
")",
"return",
"c"
] |
A not-equal filter chain.
>>> request_time = EventExpression('request', 'elapsed_ms')
>>> filtered = request_time.ne('path', '/')
>>> print(filtered)
request(elapsed_ms).ne(path, "/")
|
[
"A",
"not",
"-",
"equal",
"filter",
"chain",
"."
] |
e9d2cca9c004b8bad6d1e0b68b080f887a186a22
|
https://github.com/sbuss/pypercube/blob/e9d2cca9c004b8bad6d1e0b68b080f887a186a22/pypercube/expression.py#L250-L260
|
240,477
|
sbuss/pypercube
|
pypercube/expression.py
|
EventExpression.lt
|
def lt(self, event_property, value):
"""A less-than filter chain.
>>> request_time = EventExpression('request', 'elapsed_ms')
>>> filtered = request_time.lt('elapsed_ms', 500)
>>> print(filtered)
request(elapsed_ms).lt(elapsed_ms, 500)
"""
c = self.copy()
c.filters.append(filters.LT(event_property, value))
return c
|
python
|
def lt(self, event_property, value):
"""A less-than filter chain.
>>> request_time = EventExpression('request', 'elapsed_ms')
>>> filtered = request_time.lt('elapsed_ms', 500)
>>> print(filtered)
request(elapsed_ms).lt(elapsed_ms, 500)
"""
c = self.copy()
c.filters.append(filters.LT(event_property, value))
return c
|
[
"def",
"lt",
"(",
"self",
",",
"event_property",
",",
"value",
")",
":",
"c",
"=",
"self",
".",
"copy",
"(",
")",
"c",
".",
"filters",
".",
"append",
"(",
"filters",
".",
"LT",
"(",
"event_property",
",",
"value",
")",
")",
"return",
"c"
] |
A less-than filter chain.
>>> request_time = EventExpression('request', 'elapsed_ms')
>>> filtered = request_time.lt('elapsed_ms', 500)
>>> print(filtered)
request(elapsed_ms).lt(elapsed_ms, 500)
|
[
"A",
"less",
"-",
"than",
"filter",
"chain",
"."
] |
e9d2cca9c004b8bad6d1e0b68b080f887a186a22
|
https://github.com/sbuss/pypercube/blob/e9d2cca9c004b8bad6d1e0b68b080f887a186a22/pypercube/expression.py#L262-L272
|
240,478
|
sbuss/pypercube
|
pypercube/expression.py
|
EventExpression.le
|
def le(self, event_property, value):
"""A less-than-or-equal-to filter chain.
>>> request_time = EventExpression('request', 'elapsed_ms')
>>> filtered = request_time.le('elapsed_ms', 500)
>>> print(filtered)
request(elapsed_ms).le(elapsed_ms, 500)
"""
c = self.copy()
c.filters.append(filters.LE(event_property, value))
return c
|
python
|
def le(self, event_property, value):
"""A less-than-or-equal-to filter chain.
>>> request_time = EventExpression('request', 'elapsed_ms')
>>> filtered = request_time.le('elapsed_ms', 500)
>>> print(filtered)
request(elapsed_ms).le(elapsed_ms, 500)
"""
c = self.copy()
c.filters.append(filters.LE(event_property, value))
return c
|
[
"def",
"le",
"(",
"self",
",",
"event_property",
",",
"value",
")",
":",
"c",
"=",
"self",
".",
"copy",
"(",
")",
"c",
".",
"filters",
".",
"append",
"(",
"filters",
".",
"LE",
"(",
"event_property",
",",
"value",
")",
")",
"return",
"c"
] |
A less-than-or-equal-to filter chain.
>>> request_time = EventExpression('request', 'elapsed_ms')
>>> filtered = request_time.le('elapsed_ms', 500)
>>> print(filtered)
request(elapsed_ms).le(elapsed_ms, 500)
|
[
"A",
"less",
"-",
"than",
"-",
"or",
"-",
"equal",
"-",
"to",
"filter",
"chain",
"."
] |
e9d2cca9c004b8bad6d1e0b68b080f887a186a22
|
https://github.com/sbuss/pypercube/blob/e9d2cca9c004b8bad6d1e0b68b080f887a186a22/pypercube/expression.py#L274-L284
|
240,479
|
sbuss/pypercube
|
pypercube/expression.py
|
EventExpression.gt
|
def gt(self, event_property, value):
"""A greater-than filter chain.
>>> request_time = EventExpression('request', 'elapsed_ms')
>>> filtered = request_time.gt('elapsed_ms', 500)
>>> print(filtered)
request(elapsed_ms).gt(elapsed_ms, 500)
"""
c = self.copy()
c.filters.append(filters.GT(event_property, value))
return c
|
python
|
def gt(self, event_property, value):
"""A greater-than filter chain.
>>> request_time = EventExpression('request', 'elapsed_ms')
>>> filtered = request_time.gt('elapsed_ms', 500)
>>> print(filtered)
request(elapsed_ms).gt(elapsed_ms, 500)
"""
c = self.copy()
c.filters.append(filters.GT(event_property, value))
return c
|
[
"def",
"gt",
"(",
"self",
",",
"event_property",
",",
"value",
")",
":",
"c",
"=",
"self",
".",
"copy",
"(",
")",
"c",
".",
"filters",
".",
"append",
"(",
"filters",
".",
"GT",
"(",
"event_property",
",",
"value",
")",
")",
"return",
"c"
] |
A greater-than filter chain.
>>> request_time = EventExpression('request', 'elapsed_ms')
>>> filtered = request_time.gt('elapsed_ms', 500)
>>> print(filtered)
request(elapsed_ms).gt(elapsed_ms, 500)
|
[
"A",
"greater",
"-",
"than",
"filter",
"chain",
"."
] |
e9d2cca9c004b8bad6d1e0b68b080f887a186a22
|
https://github.com/sbuss/pypercube/blob/e9d2cca9c004b8bad6d1e0b68b080f887a186a22/pypercube/expression.py#L286-L296
|
240,480
|
sbuss/pypercube
|
pypercube/expression.py
|
EventExpression.ge
|
def ge(self, event_property, value):
"""A greater-than-or-equal-to filter chain.
>>> request_time = EventExpression('request', 'elapsed_ms')
>>> filtered = request_time.ge('elapsed_ms', 500)
>>> print(filtered)
request(elapsed_ms).ge(elapsed_ms, 500)
"""
c = self.copy()
c.filters.append(filters.GE(event_property, value))
return c
|
python
|
def ge(self, event_property, value):
"""A greater-than-or-equal-to filter chain.
>>> request_time = EventExpression('request', 'elapsed_ms')
>>> filtered = request_time.ge('elapsed_ms', 500)
>>> print(filtered)
request(elapsed_ms).ge(elapsed_ms, 500)
"""
c = self.copy()
c.filters.append(filters.GE(event_property, value))
return c
|
[
"def",
"ge",
"(",
"self",
",",
"event_property",
",",
"value",
")",
":",
"c",
"=",
"self",
".",
"copy",
"(",
")",
"c",
".",
"filters",
".",
"append",
"(",
"filters",
".",
"GE",
"(",
"event_property",
",",
"value",
")",
")",
"return",
"c"
] |
A greater-than-or-equal-to filter chain.
>>> request_time = EventExpression('request', 'elapsed_ms')
>>> filtered = request_time.ge('elapsed_ms', 500)
>>> print(filtered)
request(elapsed_ms).ge(elapsed_ms, 500)
|
[
"A",
"greater",
"-",
"than",
"-",
"or",
"-",
"equal",
"-",
"to",
"filter",
"chain",
"."
] |
e9d2cca9c004b8bad6d1e0b68b080f887a186a22
|
https://github.com/sbuss/pypercube/blob/e9d2cca9c004b8bad6d1e0b68b080f887a186a22/pypercube/expression.py#L298-L308
|
240,481
|
sbuss/pypercube
|
pypercube/expression.py
|
EventExpression.re
|
def re(self, event_property, value):
"""A regular expression filter chain.
>>> request_time = EventExpression('request', 'elapsed_ms')
>>> filtered = request_time.re('path', '[^A-Za-z0-9+]')
>>> print(filtered)
request(elapsed_ms).re(path, "[^A-Za-z0-9+]")
"""
c = self.copy()
c.filters.append(filters.RE(event_property, value))
return c
|
python
|
def re(self, event_property, value):
"""A regular expression filter chain.
>>> request_time = EventExpression('request', 'elapsed_ms')
>>> filtered = request_time.re('path', '[^A-Za-z0-9+]')
>>> print(filtered)
request(elapsed_ms).re(path, "[^A-Za-z0-9+]")
"""
c = self.copy()
c.filters.append(filters.RE(event_property, value))
return c
|
[
"def",
"re",
"(",
"self",
",",
"event_property",
",",
"value",
")",
":",
"c",
"=",
"self",
".",
"copy",
"(",
")",
"c",
".",
"filters",
".",
"append",
"(",
"filters",
".",
"RE",
"(",
"event_property",
",",
"value",
")",
")",
"return",
"c"
] |
A regular expression filter chain.
>>> request_time = EventExpression('request', 'elapsed_ms')
>>> filtered = request_time.re('path', '[^A-Za-z0-9+]')
>>> print(filtered)
request(elapsed_ms).re(path, "[^A-Za-z0-9+]")
|
[
"A",
"regular",
"expression",
"filter",
"chain",
"."
] |
e9d2cca9c004b8bad6d1e0b68b080f887a186a22
|
https://github.com/sbuss/pypercube/blob/e9d2cca9c004b8bad6d1e0b68b080f887a186a22/pypercube/expression.py#L310-L320
|
240,482
|
sbuss/pypercube
|
pypercube/expression.py
|
EventExpression.startswith
|
def startswith(self, event_property, value):
"""A starts-with filter chain.
>>> request_time = EventExpression('request', 'elapsed_ms')
>>> filtered = request_time.startswith('path', '/cube')
>>> print(filtered)
request(elapsed_ms).re(path, "^/cube")
"""
c = self.copy()
c.filters.append(filters.RE(event_property, "^{value}".format(
value=value)))
return c
|
python
|
def startswith(self, event_property, value):
"""A starts-with filter chain.
>>> request_time = EventExpression('request', 'elapsed_ms')
>>> filtered = request_time.startswith('path', '/cube')
>>> print(filtered)
request(elapsed_ms).re(path, "^/cube")
"""
c = self.copy()
c.filters.append(filters.RE(event_property, "^{value}".format(
value=value)))
return c
|
[
"def",
"startswith",
"(",
"self",
",",
"event_property",
",",
"value",
")",
":",
"c",
"=",
"self",
".",
"copy",
"(",
")",
"c",
".",
"filters",
".",
"append",
"(",
"filters",
".",
"RE",
"(",
"event_property",
",",
"\"^{value}\"",
".",
"format",
"(",
"value",
"=",
"value",
")",
")",
")",
"return",
"c"
] |
A starts-with filter chain.
>>> request_time = EventExpression('request', 'elapsed_ms')
>>> filtered = request_time.startswith('path', '/cube')
>>> print(filtered)
request(elapsed_ms).re(path, "^/cube")
|
[
"A",
"starts",
"-",
"with",
"filter",
"chain",
"."
] |
e9d2cca9c004b8bad6d1e0b68b080f887a186a22
|
https://github.com/sbuss/pypercube/blob/e9d2cca9c004b8bad6d1e0b68b080f887a186a22/pypercube/expression.py#L322-L333
|
240,483
|
sbuss/pypercube
|
pypercube/expression.py
|
EventExpression.in_array
|
def in_array(self, event_property, value):
"""An in-array filter chain.
>>> request_time = EventExpression('request', 'elapsed_ms')
>>> filtered = request_time.in_array('path', '/event')
>>> print(filtered)
request(elapsed_ms).in(path, ["/", "e", "v", "e", "n", "t"])
>>> filtered = request_time.in_array('path', ['/event', '/'])
>>> print(filtered)
request(elapsed_ms).in(path, ["/event", "/"])
"""
c = self.copy()
c.filters.append(filters.IN(event_property, value))
return c
|
python
|
def in_array(self, event_property, value):
"""An in-array filter chain.
>>> request_time = EventExpression('request', 'elapsed_ms')
>>> filtered = request_time.in_array('path', '/event')
>>> print(filtered)
request(elapsed_ms).in(path, ["/", "e", "v", "e", "n", "t"])
>>> filtered = request_time.in_array('path', ['/event', '/'])
>>> print(filtered)
request(elapsed_ms).in(path, ["/event", "/"])
"""
c = self.copy()
c.filters.append(filters.IN(event_property, value))
return c
|
[
"def",
"in_array",
"(",
"self",
",",
"event_property",
",",
"value",
")",
":",
"c",
"=",
"self",
".",
"copy",
"(",
")",
"c",
".",
"filters",
".",
"append",
"(",
"filters",
".",
"IN",
"(",
"event_property",
",",
"value",
")",
")",
"return",
"c"
] |
An in-array filter chain.
>>> request_time = EventExpression('request', 'elapsed_ms')
>>> filtered = request_time.in_array('path', '/event')
>>> print(filtered)
request(elapsed_ms).in(path, ["/", "e", "v", "e", "n", "t"])
>>> filtered = request_time.in_array('path', ['/event', '/'])
>>> print(filtered)
request(elapsed_ms).in(path, ["/event", "/"])
|
[
"An",
"in",
"-",
"array",
"filter",
"chain",
"."
] |
e9d2cca9c004b8bad6d1e0b68b080f887a186a22
|
https://github.com/sbuss/pypercube/blob/e9d2cca9c004b8bad6d1e0b68b080f887a186a22/pypercube/expression.py#L361-L374
|
240,484
|
openp2pdesign/makerlabs
|
makerlabs/timeline.py
|
get_multiple_data
|
def get_multiple_data():
"""Get data from all the platforms listed in makerlabs."""
# Get data from all the mapped platforms
all_labs = {}
all_labs["diybio_org"] = diybio_org.get_labs(format="dict")
all_labs["fablabs_io"] = fablabs_io.get_labs(format="dict")
all_labs["makeinitaly_foundation"] = makeinitaly_foundation.get_labs(
format="dict")
all_labs["hackaday_io"] = hackaday_io.get_labs(format="dict")
all_labs["hackerspaces_org"] = hackerspaces_org.get_labs(format="dict")
all_labs["makery_info"] = makery_info.get_labs(format="dict")
all_labs["nesta"] = nesta.get_labs(format="dict")
# all_labs["techshop_ws"] = techshop_ws.get_labs(format="dict")
return all_labs
|
python
|
def get_multiple_data():
"""Get data from all the platforms listed in makerlabs."""
# Get data from all the mapped platforms
all_labs = {}
all_labs["diybio_org"] = diybio_org.get_labs(format="dict")
all_labs["fablabs_io"] = fablabs_io.get_labs(format="dict")
all_labs["makeinitaly_foundation"] = makeinitaly_foundation.get_labs(
format="dict")
all_labs["hackaday_io"] = hackaday_io.get_labs(format="dict")
all_labs["hackerspaces_org"] = hackerspaces_org.get_labs(format="dict")
all_labs["makery_info"] = makery_info.get_labs(format="dict")
all_labs["nesta"] = nesta.get_labs(format="dict")
# all_labs["techshop_ws"] = techshop_ws.get_labs(format="dict")
return all_labs
|
[
"def",
"get_multiple_data",
"(",
")",
":",
"# Get data from all the mapped platforms",
"all_labs",
"=",
"{",
"}",
"all_labs",
"[",
"\"diybio_org\"",
"]",
"=",
"diybio_org",
".",
"get_labs",
"(",
"format",
"=",
"\"dict\"",
")",
"all_labs",
"[",
"\"fablabs_io\"",
"]",
"=",
"fablabs_io",
".",
"get_labs",
"(",
"format",
"=",
"\"dict\"",
")",
"all_labs",
"[",
"\"makeinitaly_foundation\"",
"]",
"=",
"makeinitaly_foundation",
".",
"get_labs",
"(",
"format",
"=",
"\"dict\"",
")",
"all_labs",
"[",
"\"hackaday_io\"",
"]",
"=",
"hackaday_io",
".",
"get_labs",
"(",
"format",
"=",
"\"dict\"",
")",
"all_labs",
"[",
"\"hackerspaces_org\"",
"]",
"=",
"hackerspaces_org",
".",
"get_labs",
"(",
"format",
"=",
"\"dict\"",
")",
"all_labs",
"[",
"\"makery_info\"",
"]",
"=",
"makery_info",
".",
"get_labs",
"(",
"format",
"=",
"\"dict\"",
")",
"all_labs",
"[",
"\"nesta\"",
"]",
"=",
"nesta",
".",
"get_labs",
"(",
"format",
"=",
"\"dict\"",
")",
"# all_labs[\"techshop_ws\"] = techshop_ws.get_labs(format=\"dict\")",
"return",
"all_labs"
] |
Get data from all the platforms listed in makerlabs.
|
[
"Get",
"data",
"from",
"all",
"the",
"platforms",
"listed",
"in",
"makerlabs",
"."
] |
b5838440174f10d370abb671358db9a99d7739fd
|
https://github.com/openp2pdesign/makerlabs/blob/b5838440174f10d370abb671358db9a99d7739fd/makerlabs/timeline.py#L24-L39
|
240,485
|
openp2pdesign/makerlabs
|
makerlabs/timeline.py
|
get_timeline
|
def get_timeline(source):
"""Rebuild a timeline of the history of makerlabs."""
# Set up the pandas timeseries dataframe
timeline_format = ["name", "type", "source", "country", "city", "latitude",
"longitude", "website_url", "twitter_url",
"facebook_page_url", "facebook_group_url",
"whois_start", "whois_end", "wayback_start",
"wayback_end", "twitter_start", "twitter_end",
"facebook_start", "facebook_end"]
timeline = pd.DataFrame(timeline_format)
# Getdata from all the mapped platforms
if source.lower() == "diybio.org":
data = diybio_org.get_labs(format="dict")
elif source.lower() == "fablabs_io":
data = fablabs_io.get_labs(format="dict")
elif source.lower() == "makeinitaly_foundation":
data = makeinitaly_foundation.get_labs(format="dict")
elif source.lower() == "hackaday_io":
data = hackaday_io.get_labs(format="dict")
elif source.lower() == "hackerspaces_org":
data = hackerspaces_org.get_labs(format="dict")
elif source.lower() == "makery_info":
data = makery_info.get_labs(format="dict")
elif source.lower() == "nesta":
data = nesta.get_labs(format="dict")
elif source.lower() == "all":
pass
# Fill the dataframe with basic details
for lab in labs_data:
for link in lab.links:
print link
if "twitter" in link:
print link
if "facebook" in link:
print link
lab_dataframe_dict = {"name": lab.name,
"type": lab.lab_type,
"source": lab.source,
"country": lab.country,
"city": lab.city,
"latitude": lab.latitude,
"longitude": lab.longitude,
"website_url": lab.url}
timeline.append(lab_dataframe_dict)
["name", "type", "source", "country", "city", "lat", "long",
"website_url", "twitter_url", "facebook_page_url",
"facebook_group_url", "whois_start", "whois_end", "wayback_start",
"wayback_end", "twitter_start", "twitter_end", "facebook_start",
"facebook_end"]
# Get time data from platforms, whenever possible
# Get domain data (whois)
# Get subdomain data (Internet Archive)
# Get social media data (Twitter)
# Get social media data (Facebook)
return timeline
|
python
|
def get_timeline(source):
"""Rebuild a timeline of the history of makerlabs."""
# Set up the pandas timeseries dataframe
timeline_format = ["name", "type", "source", "country", "city", "latitude",
"longitude", "website_url", "twitter_url",
"facebook_page_url", "facebook_group_url",
"whois_start", "whois_end", "wayback_start",
"wayback_end", "twitter_start", "twitter_end",
"facebook_start", "facebook_end"]
timeline = pd.DataFrame(timeline_format)
# Getdata from all the mapped platforms
if source.lower() == "diybio.org":
data = diybio_org.get_labs(format="dict")
elif source.lower() == "fablabs_io":
data = fablabs_io.get_labs(format="dict")
elif source.lower() == "makeinitaly_foundation":
data = makeinitaly_foundation.get_labs(format="dict")
elif source.lower() == "hackaday_io":
data = hackaday_io.get_labs(format="dict")
elif source.lower() == "hackerspaces_org":
data = hackerspaces_org.get_labs(format="dict")
elif source.lower() == "makery_info":
data = makery_info.get_labs(format="dict")
elif source.lower() == "nesta":
data = nesta.get_labs(format="dict")
elif source.lower() == "all":
pass
# Fill the dataframe with basic details
for lab in labs_data:
for link in lab.links:
print link
if "twitter" in link:
print link
if "facebook" in link:
print link
lab_dataframe_dict = {"name": lab.name,
"type": lab.lab_type,
"source": lab.source,
"country": lab.country,
"city": lab.city,
"latitude": lab.latitude,
"longitude": lab.longitude,
"website_url": lab.url}
timeline.append(lab_dataframe_dict)
["name", "type", "source", "country", "city", "lat", "long",
"website_url", "twitter_url", "facebook_page_url",
"facebook_group_url", "whois_start", "whois_end", "wayback_start",
"wayback_end", "twitter_start", "twitter_end", "facebook_start",
"facebook_end"]
# Get time data from platforms, whenever possible
# Get domain data (whois)
# Get subdomain data (Internet Archive)
# Get social media data (Twitter)
# Get social media data (Facebook)
return timeline
|
[
"def",
"get_timeline",
"(",
"source",
")",
":",
"# Set up the pandas timeseries dataframe",
"timeline_format",
"=",
"[",
"\"name\"",
",",
"\"type\"",
",",
"\"source\"",
",",
"\"country\"",
",",
"\"city\"",
",",
"\"latitude\"",
",",
"\"longitude\"",
",",
"\"website_url\"",
",",
"\"twitter_url\"",
",",
"\"facebook_page_url\"",
",",
"\"facebook_group_url\"",
",",
"\"whois_start\"",
",",
"\"whois_end\"",
",",
"\"wayback_start\"",
",",
"\"wayback_end\"",
",",
"\"twitter_start\"",
",",
"\"twitter_end\"",
",",
"\"facebook_start\"",
",",
"\"facebook_end\"",
"]",
"timeline",
"=",
"pd",
".",
"DataFrame",
"(",
"timeline_format",
")",
"# Getdata from all the mapped platforms",
"if",
"source",
".",
"lower",
"(",
")",
"==",
"\"diybio.org\"",
":",
"data",
"=",
"diybio_org",
".",
"get_labs",
"(",
"format",
"=",
"\"dict\"",
")",
"elif",
"source",
".",
"lower",
"(",
")",
"==",
"\"fablabs_io\"",
":",
"data",
"=",
"fablabs_io",
".",
"get_labs",
"(",
"format",
"=",
"\"dict\"",
")",
"elif",
"source",
".",
"lower",
"(",
")",
"==",
"\"makeinitaly_foundation\"",
":",
"data",
"=",
"makeinitaly_foundation",
".",
"get_labs",
"(",
"format",
"=",
"\"dict\"",
")",
"elif",
"source",
".",
"lower",
"(",
")",
"==",
"\"hackaday_io\"",
":",
"data",
"=",
"hackaday_io",
".",
"get_labs",
"(",
"format",
"=",
"\"dict\"",
")",
"elif",
"source",
".",
"lower",
"(",
")",
"==",
"\"hackerspaces_org\"",
":",
"data",
"=",
"hackerspaces_org",
".",
"get_labs",
"(",
"format",
"=",
"\"dict\"",
")",
"elif",
"source",
".",
"lower",
"(",
")",
"==",
"\"makery_info\"",
":",
"data",
"=",
"makery_info",
".",
"get_labs",
"(",
"format",
"=",
"\"dict\"",
")",
"elif",
"source",
".",
"lower",
"(",
")",
"==",
"\"nesta\"",
":",
"data",
"=",
"nesta",
".",
"get_labs",
"(",
"format",
"=",
"\"dict\"",
")",
"elif",
"source",
".",
"lower",
"(",
")",
"==",
"\"all\"",
":",
"pass",
"# Fill the dataframe with basic details",
"for",
"lab",
"in",
"labs_data",
":",
"for",
"link",
"in",
"lab",
".",
"links",
":",
"print",
"link",
"if",
"\"twitter\"",
"in",
"link",
":",
"print",
"link",
"if",
"\"facebook\"",
"in",
"link",
":",
"print",
"link",
"lab_dataframe_dict",
"=",
"{",
"\"name\"",
":",
"lab",
".",
"name",
",",
"\"type\"",
":",
"lab",
".",
"lab_type",
",",
"\"source\"",
":",
"lab",
".",
"source",
",",
"\"country\"",
":",
"lab",
".",
"country",
",",
"\"city\"",
":",
"lab",
".",
"city",
",",
"\"latitude\"",
":",
"lab",
".",
"latitude",
",",
"\"longitude\"",
":",
"lab",
".",
"longitude",
",",
"\"website_url\"",
":",
"lab",
".",
"url",
"}",
"timeline",
".",
"append",
"(",
"lab_dataframe_dict",
")",
"[",
"\"name\"",
",",
"\"type\"",
",",
"\"source\"",
",",
"\"country\"",
",",
"\"city\"",
",",
"\"lat\"",
",",
"\"long\"",
",",
"\"website_url\"",
",",
"\"twitter_url\"",
",",
"\"facebook_page_url\"",
",",
"\"facebook_group_url\"",
",",
"\"whois_start\"",
",",
"\"whois_end\"",
",",
"\"wayback_start\"",
",",
"\"wayback_end\"",
",",
"\"twitter_start\"",
",",
"\"twitter_end\"",
",",
"\"facebook_start\"",
",",
"\"facebook_end\"",
"]",
"# Get time data from platforms, whenever possible",
"# Get domain data (whois)",
"# Get subdomain data (Internet Archive)",
"# Get social media data (Twitter)",
"# Get social media data (Facebook)",
"return",
"timeline"
] |
Rebuild a timeline of the history of makerlabs.
|
[
"Rebuild",
"a",
"timeline",
"of",
"the",
"history",
"of",
"makerlabs",
"."
] |
b5838440174f10d370abb671358db9a99d7739fd
|
https://github.com/openp2pdesign/makerlabs/blob/b5838440174f10d370abb671358db9a99d7739fd/makerlabs/timeline.py#L42-L105
|
240,486
|
edwards-lab/libGWAS
|
libgwas/transposed_pedigree_parser.py
|
Parser.load_genotypes
|
def load_genotypes(self):
"""This really just intializes the file by opening it up. """
if DataParser.compressed_pedigree:
self.genotype_file = gzip.open("%s.gz" % self.tped_file, 'rb')
else:
self.genotype_file = open(self.tped_file)
self.filter_missing()
|
python
|
def load_genotypes(self):
"""This really just intializes the file by opening it up. """
if DataParser.compressed_pedigree:
self.genotype_file = gzip.open("%s.gz" % self.tped_file, 'rb')
else:
self.genotype_file = open(self.tped_file)
self.filter_missing()
|
[
"def",
"load_genotypes",
"(",
"self",
")",
":",
"if",
"DataParser",
".",
"compressed_pedigree",
":",
"self",
".",
"genotype_file",
"=",
"gzip",
".",
"open",
"(",
"\"%s.gz\"",
"%",
"self",
".",
"tped_file",
",",
"'rb'",
")",
"else",
":",
"self",
".",
"genotype_file",
"=",
"open",
"(",
"self",
".",
"tped_file",
")",
"self",
".",
"filter_missing",
"(",
")"
] |
This really just intializes the file by opening it up.
|
[
"This",
"really",
"just",
"intializes",
"the",
"file",
"by",
"opening",
"it",
"up",
"."
] |
d68c9a083d443dfa5d7c5112de29010909cfe23f
|
https://github.com/edwards-lab/libGWAS/blob/d68c9a083d443dfa5d7c5112de29010909cfe23f/libgwas/transposed_pedigree_parser.py#L105-L112
|
240,487
|
edwards-lab/libGWAS
|
libgwas/transposed_pedigree_parser.py
|
Parser.process_genotypes
|
def process_genotypes(self, data):
"""Parse pedigree line and remove excluded individuals from geno
Translates alleles into numerical genotypes (0, 1, 2) counting
number of minor alleles.
Throws exceptions if an there are not 2 distinct alleles
"""
# Get a list of uniq entries in the data, except for missing
alleles = list(set(data[4:]) - set(DataParser.missing_representation))
if len(alleles) > 2:
raise TooManyAlleles(chr=self.chr, rsid=self.rsid, alleles=alleles)
# We don't have a way to know this in advance, so we want to just iterate onward
# if we encounter one of these
if len(alleles) == 1:
raise TooFewAlleles(chr=self.chr, rsid=self.rsid, alleles=alleles)
# Strip out any excluded individuals
allelic_data = numpy.ma.MaskedArray(numpy.array(data[4:], dtype="S2"), self.ind_mask).compressed().reshape(-1, 2)
maj_allele_count = numpy.sum(allelic_data==alleles[0])
min_allele_count = numpy.sum(allelic_data==alleles[1])
effect_allele_count = min_allele_count
if min_allele_count > maj_allele_count:
alleles = [alleles[1], alleles[0]]
allele_count = maj_allele_count
maj_allele_count = min_allele_count
min_allele_count = allele_count
#genotypes = []
major_allele = alleles[0]
minor_allele = alleles[1]
# Genotypes represent the sum of minor alleles at each sample
genotype_data = numpy.sum(allelic_data==minor_allele, axis=1)
missing_alleles = allelic_data[:, 0]==DataParser.missing_representation
genotype_data[missing_alleles] = DataParser.missing_storage
hetero_count = numpy.sum(genotype_data==1)
return (genotype_data,
major_allele,
minor_allele,
hetero_count,
maj_allele_count,
min_allele_count,
numpy.sum(missing_alleles),
effect_allele_count)
|
python
|
def process_genotypes(self, data):
"""Parse pedigree line and remove excluded individuals from geno
Translates alleles into numerical genotypes (0, 1, 2) counting
number of minor alleles.
Throws exceptions if an there are not 2 distinct alleles
"""
# Get a list of uniq entries in the data, except for missing
alleles = list(set(data[4:]) - set(DataParser.missing_representation))
if len(alleles) > 2:
raise TooManyAlleles(chr=self.chr, rsid=self.rsid, alleles=alleles)
# We don't have a way to know this in advance, so we want to just iterate onward
# if we encounter one of these
if len(alleles) == 1:
raise TooFewAlleles(chr=self.chr, rsid=self.rsid, alleles=alleles)
# Strip out any excluded individuals
allelic_data = numpy.ma.MaskedArray(numpy.array(data[4:], dtype="S2"), self.ind_mask).compressed().reshape(-1, 2)
maj_allele_count = numpy.sum(allelic_data==alleles[0])
min_allele_count = numpy.sum(allelic_data==alleles[1])
effect_allele_count = min_allele_count
if min_allele_count > maj_allele_count:
alleles = [alleles[1], alleles[0]]
allele_count = maj_allele_count
maj_allele_count = min_allele_count
min_allele_count = allele_count
#genotypes = []
major_allele = alleles[0]
minor_allele = alleles[1]
# Genotypes represent the sum of minor alleles at each sample
genotype_data = numpy.sum(allelic_data==minor_allele, axis=1)
missing_alleles = allelic_data[:, 0]==DataParser.missing_representation
genotype_data[missing_alleles] = DataParser.missing_storage
hetero_count = numpy.sum(genotype_data==1)
return (genotype_data,
major_allele,
minor_allele,
hetero_count,
maj_allele_count,
min_allele_count,
numpy.sum(missing_alleles),
effect_allele_count)
|
[
"def",
"process_genotypes",
"(",
"self",
",",
"data",
")",
":",
"# Get a list of uniq entries in the data, except for missing",
"alleles",
"=",
"list",
"(",
"set",
"(",
"data",
"[",
"4",
":",
"]",
")",
"-",
"set",
"(",
"DataParser",
".",
"missing_representation",
")",
")",
"if",
"len",
"(",
"alleles",
")",
">",
"2",
":",
"raise",
"TooManyAlleles",
"(",
"chr",
"=",
"self",
".",
"chr",
",",
"rsid",
"=",
"self",
".",
"rsid",
",",
"alleles",
"=",
"alleles",
")",
"# We don't have a way to know this in advance, so we want to just iterate onward",
"# if we encounter one of these",
"if",
"len",
"(",
"alleles",
")",
"==",
"1",
":",
"raise",
"TooFewAlleles",
"(",
"chr",
"=",
"self",
".",
"chr",
",",
"rsid",
"=",
"self",
".",
"rsid",
",",
"alleles",
"=",
"alleles",
")",
"# Strip out any excluded individuals",
"allelic_data",
"=",
"numpy",
".",
"ma",
".",
"MaskedArray",
"(",
"numpy",
".",
"array",
"(",
"data",
"[",
"4",
":",
"]",
",",
"dtype",
"=",
"\"S2\"",
")",
",",
"self",
".",
"ind_mask",
")",
".",
"compressed",
"(",
")",
".",
"reshape",
"(",
"-",
"1",
",",
"2",
")",
"maj_allele_count",
"=",
"numpy",
".",
"sum",
"(",
"allelic_data",
"==",
"alleles",
"[",
"0",
"]",
")",
"min_allele_count",
"=",
"numpy",
".",
"sum",
"(",
"allelic_data",
"==",
"alleles",
"[",
"1",
"]",
")",
"effect_allele_count",
"=",
"min_allele_count",
"if",
"min_allele_count",
">",
"maj_allele_count",
":",
"alleles",
"=",
"[",
"alleles",
"[",
"1",
"]",
",",
"alleles",
"[",
"0",
"]",
"]",
"allele_count",
"=",
"maj_allele_count",
"maj_allele_count",
"=",
"min_allele_count",
"min_allele_count",
"=",
"allele_count",
"#genotypes = []",
"major_allele",
"=",
"alleles",
"[",
"0",
"]",
"minor_allele",
"=",
"alleles",
"[",
"1",
"]",
"# Genotypes represent the sum of minor alleles at each sample",
"genotype_data",
"=",
"numpy",
".",
"sum",
"(",
"allelic_data",
"==",
"minor_allele",
",",
"axis",
"=",
"1",
")",
"missing_alleles",
"=",
"allelic_data",
"[",
":",
",",
"0",
"]",
"==",
"DataParser",
".",
"missing_representation",
"genotype_data",
"[",
"missing_alleles",
"]",
"=",
"DataParser",
".",
"missing_storage",
"hetero_count",
"=",
"numpy",
".",
"sum",
"(",
"genotype_data",
"==",
"1",
")",
"return",
"(",
"genotype_data",
",",
"major_allele",
",",
"minor_allele",
",",
"hetero_count",
",",
"maj_allele_count",
",",
"min_allele_count",
",",
"numpy",
".",
"sum",
"(",
"missing_alleles",
")",
",",
"effect_allele_count",
")"
] |
Parse pedigree line and remove excluded individuals from geno
Translates alleles into numerical genotypes (0, 1, 2) counting
number of minor alleles.
Throws exceptions if an there are not 2 distinct alleles
|
[
"Parse",
"pedigree",
"line",
"and",
"remove",
"excluded",
"individuals",
"from",
"geno"
] |
d68c9a083d443dfa5d7c5112de29010909cfe23f
|
https://github.com/edwards-lab/libGWAS/blob/d68c9a083d443dfa5d7c5112de29010909cfe23f/libgwas/transposed_pedigree_parser.py#L114-L165
|
240,488
|
edwards-lab/libGWAS
|
libgwas/transposed_pedigree_parser.py
|
Parser.filter_missing
|
def filter_missing(self):
"""Filter out individuals and SNPs that have too many missing to be considered"""
missing = None
locus_count = 0
# Filter out individuals according to missingness
self.genotype_file.seek(0)
for genotypes in self.genotype_file:
genotypes = genotypes.split()
chr, rsid, junk, pos = genotypes[0:4]
if DataParser.boundary.TestBoundary(chr, pos, rsid):
locus_count += 1
allelic_data = numpy.array(genotypes[4:], dtype="S2").reshape(-1, 2)
if missing is None:
missing = numpy.zeros(allelic_data.shape[0], dtype='int8')
missing += (numpy.sum(0+(allelic_data==DataParser.missing_representation), axis=1)/2)
max_missing = DataParser.ind_miss_tol * locus_count
dropped_individuals = 0+(max_missing<missing)
self.ind_mask[:,0] = self.ind_mask[:,0]|dropped_individuals
self.ind_mask[:,1] = self.ind_mask[:,1]|dropped_individuals
valid_individuals = numpy.sum(self.ind_mask==0)
max_missing = DataParser.snp_miss_tol * valid_individuals
self.locus_count = 0
# We can't merge these two iterations since we need to know which individuals
# to consider for filtering on MAF
dropped_snps = []
self.genotype_file.seek(0)
for genotypes in self.genotype_file:
genotypes = genotypes.split()
chr, rsid, junk, pos = genotypes[0:4]
chr = int(chr)
pos = int(pos)
if DataParser.boundary.TestBoundary(chr, pos, rsid):
allelic_data = numpy.ma.MaskedArray(numpy.array(genotypes[4:], dtype="S2").reshape(-1, 2), self.ind_mask).compressed()
missing = numpy.sum(0+(allelic_data==DataParser.missing_representation))
if missing > max_missing:
DataParser.boundary.dropped_snps[int(chr)].add(int(pos))
dropped_snps.append(rsid)
else:
self.locus_count += 1
|
python
|
def filter_missing(self):
"""Filter out individuals and SNPs that have too many missing to be considered"""
missing = None
locus_count = 0
# Filter out individuals according to missingness
self.genotype_file.seek(0)
for genotypes in self.genotype_file:
genotypes = genotypes.split()
chr, rsid, junk, pos = genotypes[0:4]
if DataParser.boundary.TestBoundary(chr, pos, rsid):
locus_count += 1
allelic_data = numpy.array(genotypes[4:], dtype="S2").reshape(-1, 2)
if missing is None:
missing = numpy.zeros(allelic_data.shape[0], dtype='int8')
missing += (numpy.sum(0+(allelic_data==DataParser.missing_representation), axis=1)/2)
max_missing = DataParser.ind_miss_tol * locus_count
dropped_individuals = 0+(max_missing<missing)
self.ind_mask[:,0] = self.ind_mask[:,0]|dropped_individuals
self.ind_mask[:,1] = self.ind_mask[:,1]|dropped_individuals
valid_individuals = numpy.sum(self.ind_mask==0)
max_missing = DataParser.snp_miss_tol * valid_individuals
self.locus_count = 0
# We can't merge these two iterations since we need to know which individuals
# to consider for filtering on MAF
dropped_snps = []
self.genotype_file.seek(0)
for genotypes in self.genotype_file:
genotypes = genotypes.split()
chr, rsid, junk, pos = genotypes[0:4]
chr = int(chr)
pos = int(pos)
if DataParser.boundary.TestBoundary(chr, pos, rsid):
allelic_data = numpy.ma.MaskedArray(numpy.array(genotypes[4:], dtype="S2").reshape(-1, 2), self.ind_mask).compressed()
missing = numpy.sum(0+(allelic_data==DataParser.missing_representation))
if missing > max_missing:
DataParser.boundary.dropped_snps[int(chr)].add(int(pos))
dropped_snps.append(rsid)
else:
self.locus_count += 1
|
[
"def",
"filter_missing",
"(",
"self",
")",
":",
"missing",
"=",
"None",
"locus_count",
"=",
"0",
"# Filter out individuals according to missingness",
"self",
".",
"genotype_file",
".",
"seek",
"(",
"0",
")",
"for",
"genotypes",
"in",
"self",
".",
"genotype_file",
":",
"genotypes",
"=",
"genotypes",
".",
"split",
"(",
")",
"chr",
",",
"rsid",
",",
"junk",
",",
"pos",
"=",
"genotypes",
"[",
"0",
":",
"4",
"]",
"if",
"DataParser",
".",
"boundary",
".",
"TestBoundary",
"(",
"chr",
",",
"pos",
",",
"rsid",
")",
":",
"locus_count",
"+=",
"1",
"allelic_data",
"=",
"numpy",
".",
"array",
"(",
"genotypes",
"[",
"4",
":",
"]",
",",
"dtype",
"=",
"\"S2\"",
")",
".",
"reshape",
"(",
"-",
"1",
",",
"2",
")",
"if",
"missing",
"is",
"None",
":",
"missing",
"=",
"numpy",
".",
"zeros",
"(",
"allelic_data",
".",
"shape",
"[",
"0",
"]",
",",
"dtype",
"=",
"'int8'",
")",
"missing",
"+=",
"(",
"numpy",
".",
"sum",
"(",
"0",
"+",
"(",
"allelic_data",
"==",
"DataParser",
".",
"missing_representation",
")",
",",
"axis",
"=",
"1",
")",
"/",
"2",
")",
"max_missing",
"=",
"DataParser",
".",
"ind_miss_tol",
"*",
"locus_count",
"dropped_individuals",
"=",
"0",
"+",
"(",
"max_missing",
"<",
"missing",
")",
"self",
".",
"ind_mask",
"[",
":",
",",
"0",
"]",
"=",
"self",
".",
"ind_mask",
"[",
":",
",",
"0",
"]",
"|",
"dropped_individuals",
"self",
".",
"ind_mask",
"[",
":",
",",
"1",
"]",
"=",
"self",
".",
"ind_mask",
"[",
":",
",",
"1",
"]",
"|",
"dropped_individuals",
"valid_individuals",
"=",
"numpy",
".",
"sum",
"(",
"self",
".",
"ind_mask",
"==",
"0",
")",
"max_missing",
"=",
"DataParser",
".",
"snp_miss_tol",
"*",
"valid_individuals",
"self",
".",
"locus_count",
"=",
"0",
"# We can't merge these two iterations since we need to know which individuals",
"# to consider for filtering on MAF",
"dropped_snps",
"=",
"[",
"]",
"self",
".",
"genotype_file",
".",
"seek",
"(",
"0",
")",
"for",
"genotypes",
"in",
"self",
".",
"genotype_file",
":",
"genotypes",
"=",
"genotypes",
".",
"split",
"(",
")",
"chr",
",",
"rsid",
",",
"junk",
",",
"pos",
"=",
"genotypes",
"[",
"0",
":",
"4",
"]",
"chr",
"=",
"int",
"(",
"chr",
")",
"pos",
"=",
"int",
"(",
"pos",
")",
"if",
"DataParser",
".",
"boundary",
".",
"TestBoundary",
"(",
"chr",
",",
"pos",
",",
"rsid",
")",
":",
"allelic_data",
"=",
"numpy",
".",
"ma",
".",
"MaskedArray",
"(",
"numpy",
".",
"array",
"(",
"genotypes",
"[",
"4",
":",
"]",
",",
"dtype",
"=",
"\"S2\"",
")",
".",
"reshape",
"(",
"-",
"1",
",",
"2",
")",
",",
"self",
".",
"ind_mask",
")",
".",
"compressed",
"(",
")",
"missing",
"=",
"numpy",
".",
"sum",
"(",
"0",
"+",
"(",
"allelic_data",
"==",
"DataParser",
".",
"missing_representation",
")",
")",
"if",
"missing",
">",
"max_missing",
":",
"DataParser",
".",
"boundary",
".",
"dropped_snps",
"[",
"int",
"(",
"chr",
")",
"]",
".",
"add",
"(",
"int",
"(",
"pos",
")",
")",
"dropped_snps",
".",
"append",
"(",
"rsid",
")",
"else",
":",
"self",
".",
"locus_count",
"+=",
"1"
] |
Filter out individuals and SNPs that have too many missing to be considered
|
[
"Filter",
"out",
"individuals",
"and",
"SNPs",
"that",
"have",
"too",
"many",
"missing",
"to",
"be",
"considered"
] |
d68c9a083d443dfa5d7c5112de29010909cfe23f
|
https://github.com/edwards-lab/libGWAS/blob/d68c9a083d443dfa5d7c5112de29010909cfe23f/libgwas/transposed_pedigree_parser.py#L167-L211
|
240,489
|
edwards-lab/libGWAS
|
libgwas/transposed_pedigree_parser.py
|
Parser.populate_iteration
|
def populate_iteration(self, iteration):
"""Pour the current data into the iteration object"""
cur_idx = iteration.cur_idx
genotypes = self.genotype_file.next().split()
iteration.chr, iteration.rsid, junk, iteration.pos = genotypes[0:4]
iteration.chr = int(iteration.chr)
iteration.pos = int(iteration.pos)
if DataParser.boundary.TestBoundary(iteration.chr, iteration.pos, iteration.rsid):
try:
[iteration.genotype_data,
iteration.major_allele,
iteration.minor_allele,
iteration.hetero_count,
iteration.maj_allele_count,
iteration.min_allele_count,
iteration.missing_allele_count,
iteration.allele_count2] = self.process_genotypes(genotypes)
return iteration.maf >= DataParser.min_maf and iteration.maf <= DataParser.max_maf
except TooFewAlleles:
print "\n\n\nSkipping %s:%s %s %s" % (iteration.chr, iteration.pos, iteration.rsid, cur_idx)
return False
|
python
|
def populate_iteration(self, iteration):
"""Pour the current data into the iteration object"""
cur_idx = iteration.cur_idx
genotypes = self.genotype_file.next().split()
iteration.chr, iteration.rsid, junk, iteration.pos = genotypes[0:4]
iteration.chr = int(iteration.chr)
iteration.pos = int(iteration.pos)
if DataParser.boundary.TestBoundary(iteration.chr, iteration.pos, iteration.rsid):
try:
[iteration.genotype_data,
iteration.major_allele,
iteration.minor_allele,
iteration.hetero_count,
iteration.maj_allele_count,
iteration.min_allele_count,
iteration.missing_allele_count,
iteration.allele_count2] = self.process_genotypes(genotypes)
return iteration.maf >= DataParser.min_maf and iteration.maf <= DataParser.max_maf
except TooFewAlleles:
print "\n\n\nSkipping %s:%s %s %s" % (iteration.chr, iteration.pos, iteration.rsid, cur_idx)
return False
|
[
"def",
"populate_iteration",
"(",
"self",
",",
"iteration",
")",
":",
"cur_idx",
"=",
"iteration",
".",
"cur_idx",
"genotypes",
"=",
"self",
".",
"genotype_file",
".",
"next",
"(",
")",
".",
"split",
"(",
")",
"iteration",
".",
"chr",
",",
"iteration",
".",
"rsid",
",",
"junk",
",",
"iteration",
".",
"pos",
"=",
"genotypes",
"[",
"0",
":",
"4",
"]",
"iteration",
".",
"chr",
"=",
"int",
"(",
"iteration",
".",
"chr",
")",
"iteration",
".",
"pos",
"=",
"int",
"(",
"iteration",
".",
"pos",
")",
"if",
"DataParser",
".",
"boundary",
".",
"TestBoundary",
"(",
"iteration",
".",
"chr",
",",
"iteration",
".",
"pos",
",",
"iteration",
".",
"rsid",
")",
":",
"try",
":",
"[",
"iteration",
".",
"genotype_data",
",",
"iteration",
".",
"major_allele",
",",
"iteration",
".",
"minor_allele",
",",
"iteration",
".",
"hetero_count",
",",
"iteration",
".",
"maj_allele_count",
",",
"iteration",
".",
"min_allele_count",
",",
"iteration",
".",
"missing_allele_count",
",",
"iteration",
".",
"allele_count2",
"]",
"=",
"self",
".",
"process_genotypes",
"(",
"genotypes",
")",
"return",
"iteration",
".",
"maf",
">=",
"DataParser",
".",
"min_maf",
"and",
"iteration",
".",
"maf",
"<=",
"DataParser",
".",
"max_maf",
"except",
"TooFewAlleles",
":",
"print",
"\"\\n\\n\\nSkipping %s:%s %s %s\"",
"%",
"(",
"iteration",
".",
"chr",
",",
"iteration",
".",
"pos",
",",
"iteration",
".",
"rsid",
",",
"cur_idx",
")",
"return",
"False"
] |
Pour the current data into the iteration object
|
[
"Pour",
"the",
"current",
"data",
"into",
"the",
"iteration",
"object"
] |
d68c9a083d443dfa5d7c5112de29010909cfe23f
|
https://github.com/edwards-lab/libGWAS/blob/d68c9a083d443dfa5d7c5112de29010909cfe23f/libgwas/transposed_pedigree_parser.py#L215-L238
|
240,490
|
openp2pdesign/makerlabs
|
makerlabs/nesta.py
|
get_labs
|
def get_labs(format):
"""Gets current UK Makerspaces data as listed by NESTA."""
ukmakerspaces_data = data_from_nesta()
ukmakerspaces = {}
# Iterate over csv rows
for index, row in ukmakerspaces_data.iterrows():
current_lab = UKMakerspace()
current_lab.address_1 = row["Address"].replace("\r", " ")
current_lab.address_2 = row["Region"].replace("\r", " ") + " - " + row["Area"].replace("\r", " ")
current_lab.city = ""
current_lab.county = ""
current_lab.email = row["Email address"]
current_lab.latitude = ""
current_lab.longitude = ""
current_lab.links = ""
current_lab.name = row["Name of makerspace"]
current_lab.phone = row["Phone number"]
current_lab.postal_code = row["Postcode"]
current_lab.url = row["Website / URL"]
# Add the lab, with a slug from the name
ukmakerspaces[current_lab.name] = current_lab
# Return a dictiornary / json
if format.lower() == "dict" or format.lower() == "json":
output = {}
for j in ukmakerspaces:
output[j] = ukmakerspaces[j].__dict__
# Return a geojson
elif format.lower() == "geojson" or format.lower() == "geo":
labs_list = []
for l in ukmakerspaces:
single = ukmakerspaces[l].__dict__
single_lab = Feature(
type="Feature",
geometry=Point((single["latitude"], single["longitude"])),
properties=single)
labs_list.append(single_lab)
output = dumps(FeatureCollection(labs_list))
# Return a Pandas DataFrame
elif format.lower() == "pandas" or format.lower() == "dataframe":
output = {}
for j in ukmakerspaces:
output[j] = ukmakerspaces[j].__dict__
# Transform the dict into a Pandas DataFrame
output = pd.DataFrame.from_dict(output)
output = output.transpose()
# Return an object
elif format.lower() == "object" or format.lower() == "obj":
output = ukmakerspaces
# Default: return an oject
else:
output = ukmakerspaces
# Return a proper json
if format.lower() == "json":
output = json.dumps(output)
return output
|
python
|
def get_labs(format):
"""Gets current UK Makerspaces data as listed by NESTA."""
ukmakerspaces_data = data_from_nesta()
ukmakerspaces = {}
# Iterate over csv rows
for index, row in ukmakerspaces_data.iterrows():
current_lab = UKMakerspace()
current_lab.address_1 = row["Address"].replace("\r", " ")
current_lab.address_2 = row["Region"].replace("\r", " ") + " - " + row["Area"].replace("\r", " ")
current_lab.city = ""
current_lab.county = ""
current_lab.email = row["Email address"]
current_lab.latitude = ""
current_lab.longitude = ""
current_lab.links = ""
current_lab.name = row["Name of makerspace"]
current_lab.phone = row["Phone number"]
current_lab.postal_code = row["Postcode"]
current_lab.url = row["Website / URL"]
# Add the lab, with a slug from the name
ukmakerspaces[current_lab.name] = current_lab
# Return a dictiornary / json
if format.lower() == "dict" or format.lower() == "json":
output = {}
for j in ukmakerspaces:
output[j] = ukmakerspaces[j].__dict__
# Return a geojson
elif format.lower() == "geojson" or format.lower() == "geo":
labs_list = []
for l in ukmakerspaces:
single = ukmakerspaces[l].__dict__
single_lab = Feature(
type="Feature",
geometry=Point((single["latitude"], single["longitude"])),
properties=single)
labs_list.append(single_lab)
output = dumps(FeatureCollection(labs_list))
# Return a Pandas DataFrame
elif format.lower() == "pandas" or format.lower() == "dataframe":
output = {}
for j in ukmakerspaces:
output[j] = ukmakerspaces[j].__dict__
# Transform the dict into a Pandas DataFrame
output = pd.DataFrame.from_dict(output)
output = output.transpose()
# Return an object
elif format.lower() == "object" or format.lower() == "obj":
output = ukmakerspaces
# Default: return an oject
else:
output = ukmakerspaces
# Return a proper json
if format.lower() == "json":
output = json.dumps(output)
return output
|
[
"def",
"get_labs",
"(",
"format",
")",
":",
"ukmakerspaces_data",
"=",
"data_from_nesta",
"(",
")",
"ukmakerspaces",
"=",
"{",
"}",
"# Iterate over csv rows",
"for",
"index",
",",
"row",
"in",
"ukmakerspaces_data",
".",
"iterrows",
"(",
")",
":",
"current_lab",
"=",
"UKMakerspace",
"(",
")",
"current_lab",
".",
"address_1",
"=",
"row",
"[",
"\"Address\"",
"]",
".",
"replace",
"(",
"\"\\r\"",
",",
"\" \"",
")",
"current_lab",
".",
"address_2",
"=",
"row",
"[",
"\"Region\"",
"]",
".",
"replace",
"(",
"\"\\r\"",
",",
"\" \"",
")",
"+",
"\" - \"",
"+",
"row",
"[",
"\"Area\"",
"]",
".",
"replace",
"(",
"\"\\r\"",
",",
"\" \"",
")",
"current_lab",
".",
"city",
"=",
"\"\"",
"current_lab",
".",
"county",
"=",
"\"\"",
"current_lab",
".",
"email",
"=",
"row",
"[",
"\"Email address\"",
"]",
"current_lab",
".",
"latitude",
"=",
"\"\"",
"current_lab",
".",
"longitude",
"=",
"\"\"",
"current_lab",
".",
"links",
"=",
"\"\"",
"current_lab",
".",
"name",
"=",
"row",
"[",
"\"Name of makerspace\"",
"]",
"current_lab",
".",
"phone",
"=",
"row",
"[",
"\"Phone number\"",
"]",
"current_lab",
".",
"postal_code",
"=",
"row",
"[",
"\"Postcode\"",
"]",
"current_lab",
".",
"url",
"=",
"row",
"[",
"\"Website / URL\"",
"]",
"# Add the lab, with a slug from the name",
"ukmakerspaces",
"[",
"current_lab",
".",
"name",
"]",
"=",
"current_lab",
"# Return a dictiornary / json",
"if",
"format",
".",
"lower",
"(",
")",
"==",
"\"dict\"",
"or",
"format",
".",
"lower",
"(",
")",
"==",
"\"json\"",
":",
"output",
"=",
"{",
"}",
"for",
"j",
"in",
"ukmakerspaces",
":",
"output",
"[",
"j",
"]",
"=",
"ukmakerspaces",
"[",
"j",
"]",
".",
"__dict__",
"# Return a geojson",
"elif",
"format",
".",
"lower",
"(",
")",
"==",
"\"geojson\"",
"or",
"format",
".",
"lower",
"(",
")",
"==",
"\"geo\"",
":",
"labs_list",
"=",
"[",
"]",
"for",
"l",
"in",
"ukmakerspaces",
":",
"single",
"=",
"ukmakerspaces",
"[",
"l",
"]",
".",
"__dict__",
"single_lab",
"=",
"Feature",
"(",
"type",
"=",
"\"Feature\"",
",",
"geometry",
"=",
"Point",
"(",
"(",
"single",
"[",
"\"latitude\"",
"]",
",",
"single",
"[",
"\"longitude\"",
"]",
")",
")",
",",
"properties",
"=",
"single",
")",
"labs_list",
".",
"append",
"(",
"single_lab",
")",
"output",
"=",
"dumps",
"(",
"FeatureCollection",
"(",
"labs_list",
")",
")",
"# Return a Pandas DataFrame",
"elif",
"format",
".",
"lower",
"(",
")",
"==",
"\"pandas\"",
"or",
"format",
".",
"lower",
"(",
")",
"==",
"\"dataframe\"",
":",
"output",
"=",
"{",
"}",
"for",
"j",
"in",
"ukmakerspaces",
":",
"output",
"[",
"j",
"]",
"=",
"ukmakerspaces",
"[",
"j",
"]",
".",
"__dict__",
"# Transform the dict into a Pandas DataFrame",
"output",
"=",
"pd",
".",
"DataFrame",
".",
"from_dict",
"(",
"output",
")",
"output",
"=",
"output",
".",
"transpose",
"(",
")",
"# Return an object",
"elif",
"format",
".",
"lower",
"(",
")",
"==",
"\"object\"",
"or",
"format",
".",
"lower",
"(",
")",
"==",
"\"obj\"",
":",
"output",
"=",
"ukmakerspaces",
"# Default: return an oject",
"else",
":",
"output",
"=",
"ukmakerspaces",
"# Return a proper json",
"if",
"format",
".",
"lower",
"(",
")",
"==",
"\"json\"",
":",
"output",
"=",
"json",
".",
"dumps",
"(",
"output",
")",
"return",
"output"
] |
Gets current UK Makerspaces data as listed by NESTA.
|
[
"Gets",
"current",
"UK",
"Makerspaces",
"data",
"as",
"listed",
"by",
"NESTA",
"."
] |
b5838440174f10d370abb671358db9a99d7739fd
|
https://github.com/openp2pdesign/makerlabs/blob/b5838440174f10d370abb671358db9a99d7739fd/makerlabs/nesta.py#L47-L105
|
240,491
|
rmorshea/dstruct
|
dstruct/dstruct.py
|
BaseDescriptor.init_self
|
def init_self(self, cls, name):
"""Initialize this descriptor instance
Parameters
----------
cls : class
The class which owns this descriptor
name : str
The attribute name of this descriptor
"""
# the class the descriptor is defined on
self.this_class = cls
# the attribute name of this descriptor
self.this_name = name
|
python
|
def init_self(self, cls, name):
"""Initialize this descriptor instance
Parameters
----------
cls : class
The class which owns this descriptor
name : str
The attribute name of this descriptor
"""
# the class the descriptor is defined on
self.this_class = cls
# the attribute name of this descriptor
self.this_name = name
|
[
"def",
"init_self",
"(",
"self",
",",
"cls",
",",
"name",
")",
":",
"# the class the descriptor is defined on",
"self",
".",
"this_class",
"=",
"cls",
"# the attribute name of this descriptor",
"self",
".",
"this_name",
"=",
"name"
] |
Initialize this descriptor instance
Parameters
----------
cls : class
The class which owns this descriptor
name : str
The attribute name of this descriptor
|
[
"Initialize",
"this",
"descriptor",
"instance"
] |
c5eec8ac659c0846835e35ce1f59e7c3f9c9f25c
|
https://github.com/rmorshea/dstruct/blob/c5eec8ac659c0846835e35ce1f59e7c3f9c9f25c/dstruct/dstruct.py#L31-L44
|
240,492
|
rmorshea/dstruct
|
dstruct/dstruct.py
|
DataStruct.add_fields
|
def add_fields(self, **fields):
"""Add new data fields to this struct instance"""
self.__class__ = type(self.__class__.__name__,
(self.__class__,), fields)
for k, v in fields.items():
v.init_inst(self)
|
python
|
def add_fields(self, **fields):
"""Add new data fields to this struct instance"""
self.__class__ = type(self.__class__.__name__,
(self.__class__,), fields)
for k, v in fields.items():
v.init_inst(self)
|
[
"def",
"add_fields",
"(",
"self",
",",
"*",
"*",
"fields",
")",
":",
"self",
".",
"__class__",
"=",
"type",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"(",
"self",
".",
"__class__",
",",
")",
",",
"fields",
")",
"for",
"k",
",",
"v",
"in",
"fields",
".",
"items",
"(",
")",
":",
"v",
".",
"init_inst",
"(",
"self",
")"
] |
Add new data fields to this struct instance
|
[
"Add",
"new",
"data",
"fields",
"to",
"this",
"struct",
"instance"
] |
c5eec8ac659c0846835e35ce1f59e7c3f9c9f25c
|
https://github.com/rmorshea/dstruct/blob/c5eec8ac659c0846835e35ce1f59e7c3f9c9f25c/dstruct/dstruct.py#L357-L362
|
240,493
|
rmorshea/dstruct
|
dstruct/dstruct.py
|
DataStruct.del_fields
|
def del_fields(self, *names):
"""Delete data fields from this struct instance"""
cls = type(self)
self.__class__ = cls
for n in names:
# don't raise error if a field is absent
if isinstance(getattr(cls, n, None), DataField):
if n in self._field_values:
del self._field_values[n]
delattr(cls, n)
|
python
|
def del_fields(self, *names):
"""Delete data fields from this struct instance"""
cls = type(self)
self.__class__ = cls
for n in names:
# don't raise error if a field is absent
if isinstance(getattr(cls, n, None), DataField):
if n in self._field_values:
del self._field_values[n]
delattr(cls, n)
|
[
"def",
"del_fields",
"(",
"self",
",",
"*",
"names",
")",
":",
"cls",
"=",
"type",
"(",
"self",
")",
"self",
".",
"__class__",
"=",
"cls",
"for",
"n",
"in",
"names",
":",
"# don't raise error if a field is absent",
"if",
"isinstance",
"(",
"getattr",
"(",
"cls",
",",
"n",
",",
"None",
")",
",",
"DataField",
")",
":",
"if",
"n",
"in",
"self",
".",
"_field_values",
":",
"del",
"self",
".",
"_field_values",
"[",
"n",
"]",
"delattr",
"(",
"cls",
",",
"n",
")"
] |
Delete data fields from this struct instance
|
[
"Delete",
"data",
"fields",
"from",
"this",
"struct",
"instance"
] |
c5eec8ac659c0846835e35ce1f59e7c3f9c9f25c
|
https://github.com/rmorshea/dstruct/blob/c5eec8ac659c0846835e35ce1f59e7c3f9c9f25c/dstruct/dstruct.py#L364-L373
|
240,494
|
rmorshea/dstruct
|
dstruct/dstruct.py
|
DataStruct.set_field
|
def set_field(self, name, value):
"""Forcibly sets field values without parsing"""
f = getattr(self, name, None)
if isinstance(f, DataField):
f.set(self, value)
else:
raise FieldError("No field named '%s'" % name)
|
python
|
def set_field(self, name, value):
"""Forcibly sets field values without parsing"""
f = getattr(self, name, None)
if isinstance(f, DataField):
f.set(self, value)
else:
raise FieldError("No field named '%s'" % name)
|
[
"def",
"set_field",
"(",
"self",
",",
"name",
",",
"value",
")",
":",
"f",
"=",
"getattr",
"(",
"self",
",",
"name",
",",
"None",
")",
"if",
"isinstance",
"(",
"f",
",",
"DataField",
")",
":",
"f",
".",
"set",
"(",
"self",
",",
"value",
")",
"else",
":",
"raise",
"FieldError",
"(",
"\"No field named '%s'\"",
"%",
"name",
")"
] |
Forcibly sets field values without parsing
|
[
"Forcibly",
"sets",
"field",
"values",
"without",
"parsing"
] |
c5eec8ac659c0846835e35ce1f59e7c3f9c9f25c
|
https://github.com/rmorshea/dstruct/blob/c5eec8ac659c0846835e35ce1f59e7c3f9c9f25c/dstruct/dstruct.py#L375-L381
|
240,495
|
DallasMorningNews/django-datafreezer
|
datafreezer/views_compat.py
|
LoginView.get_success_url
|
def get_success_url(self):
"""Ensure the user-originating redirection URL is safe."""
redirect_to = self.request.POST.get(
self.redirect_field_name,
self.request.GET.get(self.redirect_field_name, '')
)
url_is_safe = is_safe_url(
url=redirect_to,
# allowed_hosts=self.get_success_url_allowed_hosts(),
# require_https=self.request.is_secure(),
)
if not url_is_safe:
return resolve_url(settings.LOGIN_REDIRECT_URL)
return redirect_to
|
python
|
def get_success_url(self):
"""Ensure the user-originating redirection URL is safe."""
redirect_to = self.request.POST.get(
self.redirect_field_name,
self.request.GET.get(self.redirect_field_name, '')
)
url_is_safe = is_safe_url(
url=redirect_to,
# allowed_hosts=self.get_success_url_allowed_hosts(),
# require_https=self.request.is_secure(),
)
if not url_is_safe:
return resolve_url(settings.LOGIN_REDIRECT_URL)
return redirect_to
|
[
"def",
"get_success_url",
"(",
"self",
")",
":",
"redirect_to",
"=",
"self",
".",
"request",
".",
"POST",
".",
"get",
"(",
"self",
".",
"redirect_field_name",
",",
"self",
".",
"request",
".",
"GET",
".",
"get",
"(",
"self",
".",
"redirect_field_name",
",",
"''",
")",
")",
"url_is_safe",
"=",
"is_safe_url",
"(",
"url",
"=",
"redirect_to",
",",
"# allowed_hosts=self.get_success_url_allowed_hosts(),",
"# require_https=self.request.is_secure(),",
")",
"if",
"not",
"url_is_safe",
":",
"return",
"resolve_url",
"(",
"settings",
".",
"LOGIN_REDIRECT_URL",
")",
"return",
"redirect_to"
] |
Ensure the user-originating redirection URL is safe.
|
[
"Ensure",
"the",
"user",
"-",
"originating",
"redirection",
"URL",
"is",
"safe",
"."
] |
982dcf2015c80a280f1a093e32977cb71d4ea7aa
|
https://github.com/DallasMorningNews/django-datafreezer/blob/982dcf2015c80a280f1a093e32977cb71d4ea7aa/datafreezer/views_compat.py#L61-L74
|
240,496
|
DallasMorningNews/django-datafreezer
|
datafreezer/views_compat.py
|
LoginView.form_valid
|
def form_valid(self, form):
"""Security check complete. Log the user in."""
auth_login(self.request, form.get_user())
return HttpResponseRedirect(self.get_success_url())
|
python
|
def form_valid(self, form):
"""Security check complete. Log the user in."""
auth_login(self.request, form.get_user())
return HttpResponseRedirect(self.get_success_url())
|
[
"def",
"form_valid",
"(",
"self",
",",
"form",
")",
":",
"auth_login",
"(",
"self",
".",
"request",
",",
"form",
".",
"get_user",
"(",
")",
")",
"return",
"HttpResponseRedirect",
"(",
"self",
".",
"get_success_url",
"(",
")",
")"
] |
Security check complete. Log the user in.
|
[
"Security",
"check",
"complete",
".",
"Log",
"the",
"user",
"in",
"."
] |
982dcf2015c80a280f1a093e32977cb71d4ea7aa
|
https://github.com/DallasMorningNews/django-datafreezer/blob/982dcf2015c80a280f1a093e32977cb71d4ea7aa/datafreezer/views_compat.py#L79-L82
|
240,497
|
pignacio/issue2branch
|
issue2branch/trackers/base.py
|
IssueTracker._request
|
def _request(self, method, url, **kwargs):
''' Wrap `utils.requests.request` adding user and password. '''
self._ask_for_password()
return request(method, url, user=self._user, password=self._password,
**kwargs)
|
python
|
def _request(self, method, url, **kwargs):
''' Wrap `utils.requests.request` adding user and password. '''
self._ask_for_password()
return request(method, url, user=self._user, password=self._password,
**kwargs)
|
[
"def",
"_request",
"(",
"self",
",",
"method",
",",
"url",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_ask_for_password",
"(",
")",
"return",
"request",
"(",
"method",
",",
"url",
",",
"user",
"=",
"self",
".",
"_user",
",",
"password",
"=",
"self",
".",
"_password",
",",
"*",
"*",
"kwargs",
")"
] |
Wrap `utils.requests.request` adding user and password.
|
[
"Wrap",
"utils",
".",
"requests",
".",
"request",
"adding",
"user",
"and",
"password",
"."
] |
f1a581d93df017f7ec4ad2d432b254a4a3b8c131
|
https://github.com/pignacio/issue2branch/blob/f1a581d93df017f7ec4ad2d432b254a4a3b8c131/issue2branch/trackers/base.py#L38-L42
|
240,498
|
inveniosoftware-contrib/record-recommender
|
record_recommender/profiles.py
|
Profiles.create
|
def create(self, weeks):
"""Create the user and ip profiles for the given weeks."""
user_pageviews = self.create_profiles('Pageviews', weeks)
user_downloads = self.create_profiles('Downloads', weeks)
self._export_profiles('Profiles', user_pageviews, user_downloads)
user_pageviews = self.create_profiles('Pageviews_IP', weeks, True)
user_downloads = self.create_profiles('Downloads_IP', weeks, True)
self._export_profiles('Profiles_IP', user_pageviews, user_downloads,
ip_user=True)
|
python
|
def create(self, weeks):
"""Create the user and ip profiles for the given weeks."""
user_pageviews = self.create_profiles('Pageviews', weeks)
user_downloads = self.create_profiles('Downloads', weeks)
self._export_profiles('Profiles', user_pageviews, user_downloads)
user_pageviews = self.create_profiles('Pageviews_IP', weeks, True)
user_downloads = self.create_profiles('Downloads_IP', weeks, True)
self._export_profiles('Profiles_IP', user_pageviews, user_downloads,
ip_user=True)
|
[
"def",
"create",
"(",
"self",
",",
"weeks",
")",
":",
"user_pageviews",
"=",
"self",
".",
"create_profiles",
"(",
"'Pageviews'",
",",
"weeks",
")",
"user_downloads",
"=",
"self",
".",
"create_profiles",
"(",
"'Downloads'",
",",
"weeks",
")",
"self",
".",
"_export_profiles",
"(",
"'Profiles'",
",",
"user_pageviews",
",",
"user_downloads",
")",
"user_pageviews",
"=",
"self",
".",
"create_profiles",
"(",
"'Pageviews_IP'",
",",
"weeks",
",",
"True",
")",
"user_downloads",
"=",
"self",
".",
"create_profiles",
"(",
"'Downloads_IP'",
",",
"weeks",
",",
"True",
")",
"self",
".",
"_export_profiles",
"(",
"'Profiles_IP'",
",",
"user_pageviews",
",",
"user_downloads",
",",
"ip_user",
"=",
"True",
")"
] |
Create the user and ip profiles for the given weeks.
|
[
"Create",
"the",
"user",
"and",
"ip",
"profiles",
"for",
"the",
"given",
"weeks",
"."
] |
07f71e783369e6373218b5e6ba0bf15901e9251a
|
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/profiles.py#L54-L65
|
240,499
|
inveniosoftware-contrib/record-recommender
|
record_recommender/profiles.py
|
Profiles._export_profiles
|
def _export_profiles(self, profile_name, user_pageviews, user_downloads,
ip_user=False):
"""Filter and export the user profiles."""
views_min = self.config.get('user_views_min')
views_max = self.config.get('user_views_max')
ip_user_id = 500000000000
add_user_id = 100000000000
stat_records = 0
with self.storage.get_user_profiles(profile_name) as store:
store.clear()
for user in user_pageviews:
# Only users with unique pageviews.
unique_views = len(set(user_pageviews[user]))
if views_max > unique_views >= views_min:
nodes, weight = self._calculate_user_record_weights(
record_list=user_pageviews[user],
download_list=user_downloads.get(user))
if ip_user:
store.add_user(ip_user_id, nodes, weight)
ip_user_id += 1
else:
user = str(add_user_id + int(user))
store.add_user(user, nodes, weight)
self.stat_long['User_num_records'].append(len(nodes))
stat_records += len(nodes)
elif unique_views >= views_min:
# TODO: Add stat for to many views.
print("Drop user {} with {} views".format(user,
unique_views))
self.stat['user_profiles'] = len(self.stat_long.get(
'User_num_records'))
self.stat['user_profiles_records'] = stat_records
print("Stats: {}".format(self.stat))
|
python
|
def _export_profiles(self, profile_name, user_pageviews, user_downloads,
ip_user=False):
"""Filter and export the user profiles."""
views_min = self.config.get('user_views_min')
views_max = self.config.get('user_views_max')
ip_user_id = 500000000000
add_user_id = 100000000000
stat_records = 0
with self.storage.get_user_profiles(profile_name) as store:
store.clear()
for user in user_pageviews:
# Only users with unique pageviews.
unique_views = len(set(user_pageviews[user]))
if views_max > unique_views >= views_min:
nodes, weight = self._calculate_user_record_weights(
record_list=user_pageviews[user],
download_list=user_downloads.get(user))
if ip_user:
store.add_user(ip_user_id, nodes, weight)
ip_user_id += 1
else:
user = str(add_user_id + int(user))
store.add_user(user, nodes, weight)
self.stat_long['User_num_records'].append(len(nodes))
stat_records += len(nodes)
elif unique_views >= views_min:
# TODO: Add stat for to many views.
print("Drop user {} with {} views".format(user,
unique_views))
self.stat['user_profiles'] = len(self.stat_long.get(
'User_num_records'))
self.stat['user_profiles_records'] = stat_records
print("Stats: {}".format(self.stat))
|
[
"def",
"_export_profiles",
"(",
"self",
",",
"profile_name",
",",
"user_pageviews",
",",
"user_downloads",
",",
"ip_user",
"=",
"False",
")",
":",
"views_min",
"=",
"self",
".",
"config",
".",
"get",
"(",
"'user_views_min'",
")",
"views_max",
"=",
"self",
".",
"config",
".",
"get",
"(",
"'user_views_max'",
")",
"ip_user_id",
"=",
"500000000000",
"add_user_id",
"=",
"100000000000",
"stat_records",
"=",
"0",
"with",
"self",
".",
"storage",
".",
"get_user_profiles",
"(",
"profile_name",
")",
"as",
"store",
":",
"store",
".",
"clear",
"(",
")",
"for",
"user",
"in",
"user_pageviews",
":",
"# Only users with unique pageviews.",
"unique_views",
"=",
"len",
"(",
"set",
"(",
"user_pageviews",
"[",
"user",
"]",
")",
")",
"if",
"views_max",
">",
"unique_views",
">=",
"views_min",
":",
"nodes",
",",
"weight",
"=",
"self",
".",
"_calculate_user_record_weights",
"(",
"record_list",
"=",
"user_pageviews",
"[",
"user",
"]",
",",
"download_list",
"=",
"user_downloads",
".",
"get",
"(",
"user",
")",
")",
"if",
"ip_user",
":",
"store",
".",
"add_user",
"(",
"ip_user_id",
",",
"nodes",
",",
"weight",
")",
"ip_user_id",
"+=",
"1",
"else",
":",
"user",
"=",
"str",
"(",
"add_user_id",
"+",
"int",
"(",
"user",
")",
")",
"store",
".",
"add_user",
"(",
"user",
",",
"nodes",
",",
"weight",
")",
"self",
".",
"stat_long",
"[",
"'User_num_records'",
"]",
".",
"append",
"(",
"len",
"(",
"nodes",
")",
")",
"stat_records",
"+=",
"len",
"(",
"nodes",
")",
"elif",
"unique_views",
">=",
"views_min",
":",
"# TODO: Add stat for to many views.",
"print",
"(",
"\"Drop user {} with {} views\"",
".",
"format",
"(",
"user",
",",
"unique_views",
")",
")",
"self",
".",
"stat",
"[",
"'user_profiles'",
"]",
"=",
"len",
"(",
"self",
".",
"stat_long",
".",
"get",
"(",
"'User_num_records'",
")",
")",
"self",
".",
"stat",
"[",
"'user_profiles_records'",
"]",
"=",
"stat_records",
"print",
"(",
"\"Stats: {}\"",
".",
"format",
"(",
"self",
".",
"stat",
")",
")"
] |
Filter and export the user profiles.
|
[
"Filter",
"and",
"export",
"the",
"user",
"profiles",
"."
] |
07f71e783369e6373218b5e6ba0bf15901e9251a
|
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/profiles.py#L67-L101
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.