id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
21,500
|
apache/incubator-superset
|
superset/utils/core.py
|
datetime_f
|
def datetime_f(dttm):
"""Formats datetime to take less room when it is recent"""
if dttm:
dttm = dttm.isoformat()
now_iso = datetime.now().isoformat()
if now_iso[:10] == dttm[:10]:
dttm = dttm[11:]
elif now_iso[:4] == dttm[:4]:
dttm = dttm[5:]
return '<nobr>{}</nobr>'.format(dttm)
|
python
|
def datetime_f(dttm):
"""Formats datetime to take less room when it is recent"""
if dttm:
dttm = dttm.isoformat()
now_iso = datetime.now().isoformat()
if now_iso[:10] == dttm[:10]:
dttm = dttm[11:]
elif now_iso[:4] == dttm[:4]:
dttm = dttm[5:]
return '<nobr>{}</nobr>'.format(dttm)
|
[
"def",
"datetime_f",
"(",
"dttm",
")",
":",
"if",
"dttm",
":",
"dttm",
"=",
"dttm",
".",
"isoformat",
"(",
")",
"now_iso",
"=",
"datetime",
".",
"now",
"(",
")",
".",
"isoformat",
"(",
")",
"if",
"now_iso",
"[",
":",
"10",
"]",
"==",
"dttm",
"[",
":",
"10",
"]",
":",
"dttm",
"=",
"dttm",
"[",
"11",
":",
"]",
"elif",
"now_iso",
"[",
":",
"4",
"]",
"==",
"dttm",
"[",
":",
"4",
"]",
":",
"dttm",
"=",
"dttm",
"[",
"5",
":",
"]",
"return",
"'<nobr>{}</nobr>'",
".",
"format",
"(",
"dttm",
")"
] |
Formats datetime to take less room when it is recent
|
[
"Formats",
"datetime",
"to",
"take",
"less",
"room",
"when",
"it",
"is",
"recent"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/utils/core.py#L321-L330
|
21,501
|
apache/incubator-superset
|
superset/utils/core.py
|
error_msg_from_exception
|
def error_msg_from_exception(e):
"""Translate exception into error message
Database have different ways to handle exception. This function attempts
to make sense of the exception object and construct a human readable
sentence.
TODO(bkyryliuk): parse the Presto error message from the connection
created via create_engine.
engine = create_engine('presto://localhost:3506/silver') -
gives an e.message as the str(dict)
presto.connect('localhost', port=3506, catalog='silver') - as a dict.
The latter version is parsed correctly by this function.
"""
msg = ''
if hasattr(e, 'message'):
if isinstance(e.message, dict):
msg = e.message.get('message')
elif e.message:
msg = '{}'.format(e.message)
return msg or '{}'.format(e)
|
python
|
def error_msg_from_exception(e):
"""Translate exception into error message
Database have different ways to handle exception. This function attempts
to make sense of the exception object and construct a human readable
sentence.
TODO(bkyryliuk): parse the Presto error message from the connection
created via create_engine.
engine = create_engine('presto://localhost:3506/silver') -
gives an e.message as the str(dict)
presto.connect('localhost', port=3506, catalog='silver') - as a dict.
The latter version is parsed correctly by this function.
"""
msg = ''
if hasattr(e, 'message'):
if isinstance(e.message, dict):
msg = e.message.get('message')
elif e.message:
msg = '{}'.format(e.message)
return msg or '{}'.format(e)
|
[
"def",
"error_msg_from_exception",
"(",
"e",
")",
":",
"msg",
"=",
"''",
"if",
"hasattr",
"(",
"e",
",",
"'message'",
")",
":",
"if",
"isinstance",
"(",
"e",
".",
"message",
",",
"dict",
")",
":",
"msg",
"=",
"e",
".",
"message",
".",
"get",
"(",
"'message'",
")",
"elif",
"e",
".",
"message",
":",
"msg",
"=",
"'{}'",
".",
"format",
"(",
"e",
".",
"message",
")",
"return",
"msg",
"or",
"'{}'",
".",
"format",
"(",
"e",
")"
] |
Translate exception into error message
Database have different ways to handle exception. This function attempts
to make sense of the exception object and construct a human readable
sentence.
TODO(bkyryliuk): parse the Presto error message from the connection
created via create_engine.
engine = create_engine('presto://localhost:3506/silver') -
gives an e.message as the str(dict)
presto.connect('localhost', port=3506, catalog='silver') - as a dict.
The latter version is parsed correctly by this function.
|
[
"Translate",
"exception",
"into",
"error",
"message"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/utils/core.py#L403-L423
|
21,502
|
apache/incubator-superset
|
superset/utils/core.py
|
generic_find_fk_constraint_name
|
def generic_find_fk_constraint_name(table, columns, referenced, insp):
"""Utility to find a foreign-key constraint name in alembic migrations"""
for fk in insp.get_foreign_keys(table):
if fk['referred_table'] == referenced and set(fk['referred_columns']) == columns:
return fk['name']
|
python
|
def generic_find_fk_constraint_name(table, columns, referenced, insp):
"""Utility to find a foreign-key constraint name in alembic migrations"""
for fk in insp.get_foreign_keys(table):
if fk['referred_table'] == referenced and set(fk['referred_columns']) == columns:
return fk['name']
|
[
"def",
"generic_find_fk_constraint_name",
"(",
"table",
",",
"columns",
",",
"referenced",
",",
"insp",
")",
":",
"for",
"fk",
"in",
"insp",
".",
"get_foreign_keys",
"(",
"table",
")",
":",
"if",
"fk",
"[",
"'referred_table'",
"]",
"==",
"referenced",
"and",
"set",
"(",
"fk",
"[",
"'referred_columns'",
"]",
")",
"==",
"columns",
":",
"return",
"fk",
"[",
"'name'",
"]"
] |
Utility to find a foreign-key constraint name in alembic migrations
|
[
"Utility",
"to",
"find",
"a",
"foreign",
"-",
"key",
"constraint",
"name",
"in",
"alembic",
"migrations"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/utils/core.py#L459-L463
|
21,503
|
apache/incubator-superset
|
superset/utils/core.py
|
generic_find_fk_constraint_names
|
def generic_find_fk_constraint_names(table, columns, referenced, insp):
"""Utility to find foreign-key constraint names in alembic migrations"""
names = set()
for fk in insp.get_foreign_keys(table):
if fk['referred_table'] == referenced and set(fk['referred_columns']) == columns:
names.add(fk['name'])
return names
|
python
|
def generic_find_fk_constraint_names(table, columns, referenced, insp):
"""Utility to find foreign-key constraint names in alembic migrations"""
names = set()
for fk in insp.get_foreign_keys(table):
if fk['referred_table'] == referenced and set(fk['referred_columns']) == columns:
names.add(fk['name'])
return names
|
[
"def",
"generic_find_fk_constraint_names",
"(",
"table",
",",
"columns",
",",
"referenced",
",",
"insp",
")",
":",
"names",
"=",
"set",
"(",
")",
"for",
"fk",
"in",
"insp",
".",
"get_foreign_keys",
"(",
"table",
")",
":",
"if",
"fk",
"[",
"'referred_table'",
"]",
"==",
"referenced",
"and",
"set",
"(",
"fk",
"[",
"'referred_columns'",
"]",
")",
"==",
"columns",
":",
"names",
".",
"add",
"(",
"fk",
"[",
"'name'",
"]",
")",
"return",
"names"
] |
Utility to find foreign-key constraint names in alembic migrations
|
[
"Utility",
"to",
"find",
"foreign",
"-",
"key",
"constraint",
"names",
"in",
"alembic",
"migrations"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/utils/core.py#L466-L474
|
21,504
|
apache/incubator-superset
|
superset/utils/core.py
|
generic_find_uq_constraint_name
|
def generic_find_uq_constraint_name(table, columns, insp):
"""Utility to find a unique constraint name in alembic migrations"""
for uq in insp.get_unique_constraints(table):
if columns == set(uq['column_names']):
return uq['name']
|
python
|
def generic_find_uq_constraint_name(table, columns, insp):
"""Utility to find a unique constraint name in alembic migrations"""
for uq in insp.get_unique_constraints(table):
if columns == set(uq['column_names']):
return uq['name']
|
[
"def",
"generic_find_uq_constraint_name",
"(",
"table",
",",
"columns",
",",
"insp",
")",
":",
"for",
"uq",
"in",
"insp",
".",
"get_unique_constraints",
"(",
"table",
")",
":",
"if",
"columns",
"==",
"set",
"(",
"uq",
"[",
"'column_names'",
"]",
")",
":",
"return",
"uq",
"[",
"'name'",
"]"
] |
Utility to find a unique constraint name in alembic migrations
|
[
"Utility",
"to",
"find",
"a",
"unique",
"constraint",
"name",
"in",
"alembic",
"migrations"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/utils/core.py#L477-L482
|
21,505
|
apache/incubator-superset
|
superset/utils/core.py
|
setup_cache
|
def setup_cache(app: Flask, cache_config) -> Optional[Cache]:
"""Setup the flask-cache on a flask app"""
if cache_config and cache_config.get('CACHE_TYPE') != 'null':
return Cache(app, config=cache_config)
return None
|
python
|
def setup_cache(app: Flask, cache_config) -> Optional[Cache]:
"""Setup the flask-cache on a flask app"""
if cache_config and cache_config.get('CACHE_TYPE') != 'null':
return Cache(app, config=cache_config)
return None
|
[
"def",
"setup_cache",
"(",
"app",
":",
"Flask",
",",
"cache_config",
")",
"->",
"Optional",
"[",
"Cache",
"]",
":",
"if",
"cache_config",
"and",
"cache_config",
".",
"get",
"(",
"'CACHE_TYPE'",
")",
"!=",
"'null'",
":",
"return",
"Cache",
"(",
"app",
",",
"config",
"=",
"cache_config",
")",
"return",
"None"
] |
Setup the flask-cache on a flask app
|
[
"Setup",
"the",
"flask",
"-",
"cache",
"on",
"a",
"flask",
"app"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/utils/core.py#L702-L707
|
21,506
|
apache/incubator-superset
|
superset/utils/core.py
|
user_label
|
def user_label(user: User) -> Optional[str]:
"""Given a user ORM FAB object, returns a label"""
if user:
if user.first_name and user.last_name:
return user.first_name + ' ' + user.last_name
else:
return user.username
return None
|
python
|
def user_label(user: User) -> Optional[str]:
"""Given a user ORM FAB object, returns a label"""
if user:
if user.first_name and user.last_name:
return user.first_name + ' ' + user.last_name
else:
return user.username
return None
|
[
"def",
"user_label",
"(",
"user",
":",
"User",
")",
"->",
"Optional",
"[",
"str",
"]",
":",
"if",
"user",
":",
"if",
"user",
".",
"first_name",
"and",
"user",
".",
"last_name",
":",
"return",
"user",
".",
"first_name",
"+",
"' '",
"+",
"user",
".",
"last_name",
"else",
":",
"return",
"user",
".",
"username",
"return",
"None"
] |
Given a user ORM FAB object, returns a label
|
[
"Given",
"a",
"user",
"ORM",
"FAB",
"object",
"returns",
"a",
"label"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/utils/core.py#L860-L868
|
21,507
|
apache/incubator-superset
|
superset/utils/core.py
|
get_since_until
|
def get_since_until(time_range: Optional[str] = None,
since: Optional[str] = None,
until: Optional[str] = None,
time_shift: Optional[str] = None,
relative_end: Optional[str] = None) -> Tuple[datetime, datetime]:
"""Return `since` and `until` date time tuple from string representations of
time_range, since, until and time_shift.
This functiom supports both reading the keys separately (from `since` and
`until`), as well as the new `time_range` key. Valid formats are:
- ISO 8601
- X days/years/hours/day/year/weeks
- X days/years/hours/day/year/weeks ago
- X days/years/hours/day/year/weeks from now
- freeform
Additionally, for `time_range` (these specify both `since` and `until`):
- Last day
- Last week
- Last month
- Last quarter
- Last year
- No filter
- Last X seconds/minutes/hours/days/weeks/months/years
- Next X seconds/minutes/hours/days/weeks/months/years
"""
separator = ' : '
relative_end = parse_human_datetime(relative_end if relative_end else 'today')
common_time_frames = {
'Last day': (relative_end - relativedelta(days=1), relative_end), # noqa: T400
'Last week': (relative_end - relativedelta(weeks=1), relative_end), # noqa: T400
'Last month': (relative_end - relativedelta(months=1), relative_end), # noqa: E501, T400
'Last quarter': (relative_end - relativedelta(months=3), relative_end), # noqa: E501, T400
'Last year': (relative_end - relativedelta(years=1), relative_end), # noqa: T400
}
if time_range:
if separator in time_range:
since, until = time_range.split(separator, 1)
if since and since not in common_time_frames:
since = add_ago_to_since(since)
since = parse_human_datetime(since)
until = parse_human_datetime(until)
elif time_range in common_time_frames:
since, until = common_time_frames[time_range]
elif time_range == 'No filter':
since = until = None
else:
rel, num, grain = time_range.split()
if rel == 'Last':
since = relative_end - relativedelta(**{grain: int(num)}) # noqa: T400
until = relative_end
else: # rel == 'Next'
since = relative_end
until = relative_end + relativedelta(**{grain: int(num)}) # noqa: T400
else:
since = since or ''
if since:
since = add_ago_to_since(since)
since = parse_human_datetime(since)
until = parse_human_datetime(until) if until else relative_end
if time_shift:
time_shift = parse_human_timedelta(time_shift)
since = since if since is None else (since - time_shift) # noqa: T400
until = until if until is None else (until - time_shift) # noqa: T400
if since and until and since > until:
raise ValueError(_('From date cannot be larger than to date'))
return since, until
|
python
|
def get_since_until(time_range: Optional[str] = None,
since: Optional[str] = None,
until: Optional[str] = None,
time_shift: Optional[str] = None,
relative_end: Optional[str] = None) -> Tuple[datetime, datetime]:
"""Return `since` and `until` date time tuple from string representations of
time_range, since, until and time_shift.
This functiom supports both reading the keys separately (from `since` and
`until`), as well as the new `time_range` key. Valid formats are:
- ISO 8601
- X days/years/hours/day/year/weeks
- X days/years/hours/day/year/weeks ago
- X days/years/hours/day/year/weeks from now
- freeform
Additionally, for `time_range` (these specify both `since` and `until`):
- Last day
- Last week
- Last month
- Last quarter
- Last year
- No filter
- Last X seconds/minutes/hours/days/weeks/months/years
- Next X seconds/minutes/hours/days/weeks/months/years
"""
separator = ' : '
relative_end = parse_human_datetime(relative_end if relative_end else 'today')
common_time_frames = {
'Last day': (relative_end - relativedelta(days=1), relative_end), # noqa: T400
'Last week': (relative_end - relativedelta(weeks=1), relative_end), # noqa: T400
'Last month': (relative_end - relativedelta(months=1), relative_end), # noqa: E501, T400
'Last quarter': (relative_end - relativedelta(months=3), relative_end), # noqa: E501, T400
'Last year': (relative_end - relativedelta(years=1), relative_end), # noqa: T400
}
if time_range:
if separator in time_range:
since, until = time_range.split(separator, 1)
if since and since not in common_time_frames:
since = add_ago_to_since(since)
since = parse_human_datetime(since)
until = parse_human_datetime(until)
elif time_range in common_time_frames:
since, until = common_time_frames[time_range]
elif time_range == 'No filter':
since = until = None
else:
rel, num, grain = time_range.split()
if rel == 'Last':
since = relative_end - relativedelta(**{grain: int(num)}) # noqa: T400
until = relative_end
else: # rel == 'Next'
since = relative_end
until = relative_end + relativedelta(**{grain: int(num)}) # noqa: T400
else:
since = since or ''
if since:
since = add_ago_to_since(since)
since = parse_human_datetime(since)
until = parse_human_datetime(until) if until else relative_end
if time_shift:
time_shift = parse_human_timedelta(time_shift)
since = since if since is None else (since - time_shift) # noqa: T400
until = until if until is None else (until - time_shift) # noqa: T400
if since and until and since > until:
raise ValueError(_('From date cannot be larger than to date'))
return since, until
|
[
"def",
"get_since_until",
"(",
"time_range",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"since",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"until",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"time_shift",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"relative_end",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"Tuple",
"[",
"datetime",
",",
"datetime",
"]",
":",
"separator",
"=",
"' : '",
"relative_end",
"=",
"parse_human_datetime",
"(",
"relative_end",
"if",
"relative_end",
"else",
"'today'",
")",
"common_time_frames",
"=",
"{",
"'Last day'",
":",
"(",
"relative_end",
"-",
"relativedelta",
"(",
"days",
"=",
"1",
")",
",",
"relative_end",
")",
",",
"# noqa: T400",
"'Last week'",
":",
"(",
"relative_end",
"-",
"relativedelta",
"(",
"weeks",
"=",
"1",
")",
",",
"relative_end",
")",
",",
"# noqa: T400",
"'Last month'",
":",
"(",
"relative_end",
"-",
"relativedelta",
"(",
"months",
"=",
"1",
")",
",",
"relative_end",
")",
",",
"# noqa: E501, T400",
"'Last quarter'",
":",
"(",
"relative_end",
"-",
"relativedelta",
"(",
"months",
"=",
"3",
")",
",",
"relative_end",
")",
",",
"# noqa: E501, T400",
"'Last year'",
":",
"(",
"relative_end",
"-",
"relativedelta",
"(",
"years",
"=",
"1",
")",
",",
"relative_end",
")",
",",
"# noqa: T400",
"}",
"if",
"time_range",
":",
"if",
"separator",
"in",
"time_range",
":",
"since",
",",
"until",
"=",
"time_range",
".",
"split",
"(",
"separator",
",",
"1",
")",
"if",
"since",
"and",
"since",
"not",
"in",
"common_time_frames",
":",
"since",
"=",
"add_ago_to_since",
"(",
"since",
")",
"since",
"=",
"parse_human_datetime",
"(",
"since",
")",
"until",
"=",
"parse_human_datetime",
"(",
"until",
")",
"elif",
"time_range",
"in",
"common_time_frames",
":",
"since",
",",
"until",
"=",
"common_time_frames",
"[",
"time_range",
"]",
"elif",
"time_range",
"==",
"'No filter'",
":",
"since",
"=",
"until",
"=",
"None",
"else",
":",
"rel",
",",
"num",
",",
"grain",
"=",
"time_range",
".",
"split",
"(",
")",
"if",
"rel",
"==",
"'Last'",
":",
"since",
"=",
"relative_end",
"-",
"relativedelta",
"(",
"*",
"*",
"{",
"grain",
":",
"int",
"(",
"num",
")",
"}",
")",
"# noqa: T400",
"until",
"=",
"relative_end",
"else",
":",
"# rel == 'Next'",
"since",
"=",
"relative_end",
"until",
"=",
"relative_end",
"+",
"relativedelta",
"(",
"*",
"*",
"{",
"grain",
":",
"int",
"(",
"num",
")",
"}",
")",
"# noqa: T400",
"else",
":",
"since",
"=",
"since",
"or",
"''",
"if",
"since",
":",
"since",
"=",
"add_ago_to_since",
"(",
"since",
")",
"since",
"=",
"parse_human_datetime",
"(",
"since",
")",
"until",
"=",
"parse_human_datetime",
"(",
"until",
")",
"if",
"until",
"else",
"relative_end",
"if",
"time_shift",
":",
"time_shift",
"=",
"parse_human_timedelta",
"(",
"time_shift",
")",
"since",
"=",
"since",
"if",
"since",
"is",
"None",
"else",
"(",
"since",
"-",
"time_shift",
")",
"# noqa: T400",
"until",
"=",
"until",
"if",
"until",
"is",
"None",
"else",
"(",
"until",
"-",
"time_shift",
")",
"# noqa: T400",
"if",
"since",
"and",
"until",
"and",
"since",
">",
"until",
":",
"raise",
"ValueError",
"(",
"_",
"(",
"'From date cannot be larger than to date'",
")",
")",
"return",
"since",
",",
"until"
] |
Return `since` and `until` date time tuple from string representations of
time_range, since, until and time_shift.
This functiom supports both reading the keys separately (from `since` and
`until`), as well as the new `time_range` key. Valid formats are:
- ISO 8601
- X days/years/hours/day/year/weeks
- X days/years/hours/day/year/weeks ago
- X days/years/hours/day/year/weeks from now
- freeform
Additionally, for `time_range` (these specify both `since` and `until`):
- Last day
- Last week
- Last month
- Last quarter
- Last year
- No filter
- Last X seconds/minutes/hours/days/weeks/months/years
- Next X seconds/minutes/hours/days/weeks/months/years
|
[
"Return",
"since",
"and",
"until",
"date",
"time",
"tuple",
"from",
"string",
"representations",
"of",
"time_range",
"since",
"until",
"and",
"time_shift",
"."
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/utils/core.py#L932-L1005
|
21,508
|
apache/incubator-superset
|
superset/utils/core.py
|
split_adhoc_filters_into_base_filters
|
def split_adhoc_filters_into_base_filters(fd):
"""
Mutates form data to restructure the adhoc filters in the form of the four base
filters, `where`, `having`, `filters`, and `having_filters` which represent
free form where sql, free form having sql, structured where clauses and structured
having clauses.
"""
adhoc_filters = fd.get('adhoc_filters')
if isinstance(adhoc_filters, list):
simple_where_filters = []
simple_having_filters = []
sql_where_filters = []
sql_having_filters = []
for adhoc_filter in adhoc_filters:
expression_type = adhoc_filter.get('expressionType')
clause = adhoc_filter.get('clause')
if expression_type == 'SIMPLE':
if clause == 'WHERE':
simple_where_filters.append({
'col': adhoc_filter.get('subject'),
'op': adhoc_filter.get('operator'),
'val': adhoc_filter.get('comparator'),
})
elif clause == 'HAVING':
simple_having_filters.append({
'col': adhoc_filter.get('subject'),
'op': adhoc_filter.get('operator'),
'val': adhoc_filter.get('comparator'),
})
elif expression_type == 'SQL':
if clause == 'WHERE':
sql_where_filters.append(adhoc_filter.get('sqlExpression'))
elif clause == 'HAVING':
sql_having_filters.append(adhoc_filter.get('sqlExpression'))
fd['where'] = ' AND '.join(['({})'.format(sql) for sql in sql_where_filters])
fd['having'] = ' AND '.join(['({})'.format(sql) for sql in sql_having_filters])
fd['having_filters'] = simple_having_filters
fd['filters'] = simple_where_filters
|
python
|
def split_adhoc_filters_into_base_filters(fd):
"""
Mutates form data to restructure the adhoc filters in the form of the four base
filters, `where`, `having`, `filters`, and `having_filters` which represent
free form where sql, free form having sql, structured where clauses and structured
having clauses.
"""
adhoc_filters = fd.get('adhoc_filters')
if isinstance(adhoc_filters, list):
simple_where_filters = []
simple_having_filters = []
sql_where_filters = []
sql_having_filters = []
for adhoc_filter in adhoc_filters:
expression_type = adhoc_filter.get('expressionType')
clause = adhoc_filter.get('clause')
if expression_type == 'SIMPLE':
if clause == 'WHERE':
simple_where_filters.append({
'col': adhoc_filter.get('subject'),
'op': adhoc_filter.get('operator'),
'val': adhoc_filter.get('comparator'),
})
elif clause == 'HAVING':
simple_having_filters.append({
'col': adhoc_filter.get('subject'),
'op': adhoc_filter.get('operator'),
'val': adhoc_filter.get('comparator'),
})
elif expression_type == 'SQL':
if clause == 'WHERE':
sql_where_filters.append(adhoc_filter.get('sqlExpression'))
elif clause == 'HAVING':
sql_having_filters.append(adhoc_filter.get('sqlExpression'))
fd['where'] = ' AND '.join(['({})'.format(sql) for sql in sql_where_filters])
fd['having'] = ' AND '.join(['({})'.format(sql) for sql in sql_having_filters])
fd['having_filters'] = simple_having_filters
fd['filters'] = simple_where_filters
|
[
"def",
"split_adhoc_filters_into_base_filters",
"(",
"fd",
")",
":",
"adhoc_filters",
"=",
"fd",
".",
"get",
"(",
"'adhoc_filters'",
")",
"if",
"isinstance",
"(",
"adhoc_filters",
",",
"list",
")",
":",
"simple_where_filters",
"=",
"[",
"]",
"simple_having_filters",
"=",
"[",
"]",
"sql_where_filters",
"=",
"[",
"]",
"sql_having_filters",
"=",
"[",
"]",
"for",
"adhoc_filter",
"in",
"adhoc_filters",
":",
"expression_type",
"=",
"adhoc_filter",
".",
"get",
"(",
"'expressionType'",
")",
"clause",
"=",
"adhoc_filter",
".",
"get",
"(",
"'clause'",
")",
"if",
"expression_type",
"==",
"'SIMPLE'",
":",
"if",
"clause",
"==",
"'WHERE'",
":",
"simple_where_filters",
".",
"append",
"(",
"{",
"'col'",
":",
"adhoc_filter",
".",
"get",
"(",
"'subject'",
")",
",",
"'op'",
":",
"adhoc_filter",
".",
"get",
"(",
"'operator'",
")",
",",
"'val'",
":",
"adhoc_filter",
".",
"get",
"(",
"'comparator'",
")",
",",
"}",
")",
"elif",
"clause",
"==",
"'HAVING'",
":",
"simple_having_filters",
".",
"append",
"(",
"{",
"'col'",
":",
"adhoc_filter",
".",
"get",
"(",
"'subject'",
")",
",",
"'op'",
":",
"adhoc_filter",
".",
"get",
"(",
"'operator'",
")",
",",
"'val'",
":",
"adhoc_filter",
".",
"get",
"(",
"'comparator'",
")",
",",
"}",
")",
"elif",
"expression_type",
"==",
"'SQL'",
":",
"if",
"clause",
"==",
"'WHERE'",
":",
"sql_where_filters",
".",
"append",
"(",
"adhoc_filter",
".",
"get",
"(",
"'sqlExpression'",
")",
")",
"elif",
"clause",
"==",
"'HAVING'",
":",
"sql_having_filters",
".",
"append",
"(",
"adhoc_filter",
".",
"get",
"(",
"'sqlExpression'",
")",
")",
"fd",
"[",
"'where'",
"]",
"=",
"' AND '",
".",
"join",
"(",
"[",
"'({})'",
".",
"format",
"(",
"sql",
")",
"for",
"sql",
"in",
"sql_where_filters",
"]",
")",
"fd",
"[",
"'having'",
"]",
"=",
"' AND '",
".",
"join",
"(",
"[",
"'({})'",
".",
"format",
"(",
"sql",
")",
"for",
"sql",
"in",
"sql_having_filters",
"]",
")",
"fd",
"[",
"'having_filters'",
"]",
"=",
"simple_having_filters",
"fd",
"[",
"'filters'",
"]",
"=",
"simple_where_filters"
] |
Mutates form data to restructure the adhoc filters in the form of the four base
filters, `where`, `having`, `filters`, and `having_filters` which represent
free form where sql, free form having sql, structured where clauses and structured
having clauses.
|
[
"Mutates",
"form",
"data",
"to",
"restructure",
"the",
"adhoc",
"filters",
"in",
"the",
"form",
"of",
"the",
"four",
"base",
"filters",
"where",
"having",
"filters",
"and",
"having_filters",
"which",
"represent",
"free",
"form",
"where",
"sql",
"free",
"form",
"having",
"sql",
"structured",
"where",
"clauses",
"and",
"structured",
"having",
"clauses",
"."
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/utils/core.py#L1043-L1080
|
21,509
|
apache/incubator-superset
|
superset/data/energy.py
|
load_energy
|
def load_energy():
"""Loads an energy related dataset to use with sankey and graphs"""
tbl_name = 'energy_usage'
data = get_example_data('energy.json.gz')
pdf = pd.read_json(data)
pdf.to_sql(
tbl_name,
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'source': String(255),
'target': String(255),
'value': Float(),
},
index=False)
print('Creating table [wb_health_population] reference')
tbl = db.session.query(TBL).filter_by(table_name=tbl_name).first()
if not tbl:
tbl = TBL(table_name=tbl_name)
tbl.description = 'Energy consumption'
tbl.database = utils.get_or_create_main_db()
if not any(col.metric_name == 'sum__value' for col in tbl.metrics):
tbl.metrics.append(SqlMetric(
metric_name='sum__value',
expression='SUM(value)',
))
db.session.merge(tbl)
db.session.commit()
tbl.fetch_metadata()
slc = Slice(
slice_name='Energy Sankey',
viz_type='sankey',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{
"collapsed_fieldsets": "",
"groupby": [
"source",
"target"
],
"having": "",
"metric": "sum__value",
"row_limit": "5000",
"slice_name": "Energy Sankey",
"viz_type": "sankey",
"where": ""
}
"""),
)
misc_dash_slices.add(slc.slice_name)
merge_slice(slc)
slc = Slice(
slice_name='Energy Force Layout',
viz_type='directed_force',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{
"charge": "-500",
"collapsed_fieldsets": "",
"groupby": [
"source",
"target"
],
"having": "",
"link_length": "200",
"metric": "sum__value",
"row_limit": "5000",
"slice_name": "Force",
"viz_type": "directed_force",
"where": ""
}
"""),
)
misc_dash_slices.add(slc.slice_name)
merge_slice(slc)
slc = Slice(
slice_name='Heatmap',
viz_type='heatmap',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{
"all_columns_x": "source",
"all_columns_y": "target",
"canvas_image_rendering": "pixelated",
"collapsed_fieldsets": "",
"having": "",
"linear_color_scheme": "blue_white_yellow",
"metric": "sum__value",
"normalize_across": "heatmap",
"slice_name": "Heatmap",
"viz_type": "heatmap",
"where": "",
"xscale_interval": "1",
"yscale_interval": "1"
}
"""),
)
misc_dash_slices.add(slc.slice_name)
merge_slice(slc)
|
python
|
def load_energy():
"""Loads an energy related dataset to use with sankey and graphs"""
tbl_name = 'energy_usage'
data = get_example_data('energy.json.gz')
pdf = pd.read_json(data)
pdf.to_sql(
tbl_name,
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'source': String(255),
'target': String(255),
'value': Float(),
},
index=False)
print('Creating table [wb_health_population] reference')
tbl = db.session.query(TBL).filter_by(table_name=tbl_name).first()
if not tbl:
tbl = TBL(table_name=tbl_name)
tbl.description = 'Energy consumption'
tbl.database = utils.get_or_create_main_db()
if not any(col.metric_name == 'sum__value' for col in tbl.metrics):
tbl.metrics.append(SqlMetric(
metric_name='sum__value',
expression='SUM(value)',
))
db.session.merge(tbl)
db.session.commit()
tbl.fetch_metadata()
slc = Slice(
slice_name='Energy Sankey',
viz_type='sankey',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{
"collapsed_fieldsets": "",
"groupby": [
"source",
"target"
],
"having": "",
"metric": "sum__value",
"row_limit": "5000",
"slice_name": "Energy Sankey",
"viz_type": "sankey",
"where": ""
}
"""),
)
misc_dash_slices.add(slc.slice_name)
merge_slice(slc)
slc = Slice(
slice_name='Energy Force Layout',
viz_type='directed_force',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{
"charge": "-500",
"collapsed_fieldsets": "",
"groupby": [
"source",
"target"
],
"having": "",
"link_length": "200",
"metric": "sum__value",
"row_limit": "5000",
"slice_name": "Force",
"viz_type": "directed_force",
"where": ""
}
"""),
)
misc_dash_slices.add(slc.slice_name)
merge_slice(slc)
slc = Slice(
slice_name='Heatmap',
viz_type='heatmap',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{
"all_columns_x": "source",
"all_columns_y": "target",
"canvas_image_rendering": "pixelated",
"collapsed_fieldsets": "",
"having": "",
"linear_color_scheme": "blue_white_yellow",
"metric": "sum__value",
"normalize_across": "heatmap",
"slice_name": "Heatmap",
"viz_type": "heatmap",
"where": "",
"xscale_interval": "1",
"yscale_interval": "1"
}
"""),
)
misc_dash_slices.add(slc.slice_name)
merge_slice(slc)
|
[
"def",
"load_energy",
"(",
")",
":",
"tbl_name",
"=",
"'energy_usage'",
"data",
"=",
"get_example_data",
"(",
"'energy.json.gz'",
")",
"pdf",
"=",
"pd",
".",
"read_json",
"(",
"data",
")",
"pdf",
".",
"to_sql",
"(",
"tbl_name",
",",
"db",
".",
"engine",
",",
"if_exists",
"=",
"'replace'",
",",
"chunksize",
"=",
"500",
",",
"dtype",
"=",
"{",
"'source'",
":",
"String",
"(",
"255",
")",
",",
"'target'",
":",
"String",
"(",
"255",
")",
",",
"'value'",
":",
"Float",
"(",
")",
",",
"}",
",",
"index",
"=",
"False",
")",
"print",
"(",
"'Creating table [wb_health_population] reference'",
")",
"tbl",
"=",
"db",
".",
"session",
".",
"query",
"(",
"TBL",
")",
".",
"filter_by",
"(",
"table_name",
"=",
"tbl_name",
")",
".",
"first",
"(",
")",
"if",
"not",
"tbl",
":",
"tbl",
"=",
"TBL",
"(",
"table_name",
"=",
"tbl_name",
")",
"tbl",
".",
"description",
"=",
"'Energy consumption'",
"tbl",
".",
"database",
"=",
"utils",
".",
"get_or_create_main_db",
"(",
")",
"if",
"not",
"any",
"(",
"col",
".",
"metric_name",
"==",
"'sum__value'",
"for",
"col",
"in",
"tbl",
".",
"metrics",
")",
":",
"tbl",
".",
"metrics",
".",
"append",
"(",
"SqlMetric",
"(",
"metric_name",
"=",
"'sum__value'",
",",
"expression",
"=",
"'SUM(value)'",
",",
")",
")",
"db",
".",
"session",
".",
"merge",
"(",
"tbl",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"tbl",
".",
"fetch_metadata",
"(",
")",
"slc",
"=",
"Slice",
"(",
"slice_name",
"=",
"'Energy Sankey'",
",",
"viz_type",
"=",
"'sankey'",
",",
"datasource_type",
"=",
"'table'",
",",
"datasource_id",
"=",
"tbl",
".",
"id",
",",
"params",
"=",
"textwrap",
".",
"dedent",
"(",
"\"\"\"\\\n {\n \"collapsed_fieldsets\": \"\",\n \"groupby\": [\n \"source\",\n \"target\"\n ],\n \"having\": \"\",\n \"metric\": \"sum__value\",\n \"row_limit\": \"5000\",\n \"slice_name\": \"Energy Sankey\",\n \"viz_type\": \"sankey\",\n \"where\": \"\"\n }\n \"\"\"",
")",
",",
")",
"misc_dash_slices",
".",
"add",
"(",
"slc",
".",
"slice_name",
")",
"merge_slice",
"(",
"slc",
")",
"slc",
"=",
"Slice",
"(",
"slice_name",
"=",
"'Energy Force Layout'",
",",
"viz_type",
"=",
"'directed_force'",
",",
"datasource_type",
"=",
"'table'",
",",
"datasource_id",
"=",
"tbl",
".",
"id",
",",
"params",
"=",
"textwrap",
".",
"dedent",
"(",
"\"\"\"\\\n {\n \"charge\": \"-500\",\n \"collapsed_fieldsets\": \"\",\n \"groupby\": [\n \"source\",\n \"target\"\n ],\n \"having\": \"\",\n \"link_length\": \"200\",\n \"metric\": \"sum__value\",\n \"row_limit\": \"5000\",\n \"slice_name\": \"Force\",\n \"viz_type\": \"directed_force\",\n \"where\": \"\"\n }\n \"\"\"",
")",
",",
")",
"misc_dash_slices",
".",
"add",
"(",
"slc",
".",
"slice_name",
")",
"merge_slice",
"(",
"slc",
")",
"slc",
"=",
"Slice",
"(",
"slice_name",
"=",
"'Heatmap'",
",",
"viz_type",
"=",
"'heatmap'",
",",
"datasource_type",
"=",
"'table'",
",",
"datasource_id",
"=",
"tbl",
".",
"id",
",",
"params",
"=",
"textwrap",
".",
"dedent",
"(",
"\"\"\"\\\n {\n \"all_columns_x\": \"source\",\n \"all_columns_y\": \"target\",\n \"canvas_image_rendering\": \"pixelated\",\n \"collapsed_fieldsets\": \"\",\n \"having\": \"\",\n \"linear_color_scheme\": \"blue_white_yellow\",\n \"metric\": \"sum__value\",\n \"normalize_across\": \"heatmap\",\n \"slice_name\": \"Heatmap\",\n \"viz_type\": \"heatmap\",\n \"where\": \"\",\n \"xscale_interval\": \"1\",\n \"yscale_interval\": \"1\"\n }\n \"\"\"",
")",
",",
")",
"misc_dash_slices",
".",
"add",
"(",
"slc",
".",
"slice_name",
")",
"merge_slice",
"(",
"slc",
")"
] |
Loads an energy related dataset to use with sankey and graphs
|
[
"Loads",
"an",
"energy",
"related",
"dataset",
"to",
"use",
"with",
"sankey",
"and",
"graphs"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/data/energy.py#L32-L140
|
21,510
|
apache/incubator-superset
|
superset/cli.py
|
runserver
|
def runserver(debug, console_log, use_reloader, address, port, timeout, workers, socket):
"""Starts a Superset web server."""
debug = debug or config.get('DEBUG') or console_log
if debug:
print(Fore.BLUE + '-=' * 20)
print(
Fore.YELLOW + 'Starting Superset server in ' +
Fore.RED + 'DEBUG' +
Fore.YELLOW + ' mode')
print(Fore.BLUE + '-=' * 20)
print(Style.RESET_ALL)
if console_log:
console_log_run(app, port, use_reloader)
else:
debug_run(app, port, use_reloader)
else:
logging.info(
"The Gunicorn 'superset runserver' command is deprecated. Please "
"use the 'gunicorn' command instead.")
addr_str = f' unix:{socket} ' if socket else f' {address}:{port} '
cmd = (
'gunicorn '
f'-w {workers} '
f'--timeout {timeout} '
f'-b {addr_str} '
'--limit-request-line 0 '
'--limit-request-field_size 0 '
'superset:app'
)
print(Fore.GREEN + 'Starting server with command: ')
print(Fore.YELLOW + cmd)
print(Style.RESET_ALL)
Popen(cmd, shell=True).wait()
|
python
|
def runserver(debug, console_log, use_reloader, address, port, timeout, workers, socket):
"""Starts a Superset web server."""
debug = debug or config.get('DEBUG') or console_log
if debug:
print(Fore.BLUE + '-=' * 20)
print(
Fore.YELLOW + 'Starting Superset server in ' +
Fore.RED + 'DEBUG' +
Fore.YELLOW + ' mode')
print(Fore.BLUE + '-=' * 20)
print(Style.RESET_ALL)
if console_log:
console_log_run(app, port, use_reloader)
else:
debug_run(app, port, use_reloader)
else:
logging.info(
"The Gunicorn 'superset runserver' command is deprecated. Please "
"use the 'gunicorn' command instead.")
addr_str = f' unix:{socket} ' if socket else f' {address}:{port} '
cmd = (
'gunicorn '
f'-w {workers} '
f'--timeout {timeout} '
f'-b {addr_str} '
'--limit-request-line 0 '
'--limit-request-field_size 0 '
'superset:app'
)
print(Fore.GREEN + 'Starting server with command: ')
print(Fore.YELLOW + cmd)
print(Style.RESET_ALL)
Popen(cmd, shell=True).wait()
|
[
"def",
"runserver",
"(",
"debug",
",",
"console_log",
",",
"use_reloader",
",",
"address",
",",
"port",
",",
"timeout",
",",
"workers",
",",
"socket",
")",
":",
"debug",
"=",
"debug",
"or",
"config",
".",
"get",
"(",
"'DEBUG'",
")",
"or",
"console_log",
"if",
"debug",
":",
"print",
"(",
"Fore",
".",
"BLUE",
"+",
"'-='",
"*",
"20",
")",
"print",
"(",
"Fore",
".",
"YELLOW",
"+",
"'Starting Superset server in '",
"+",
"Fore",
".",
"RED",
"+",
"'DEBUG'",
"+",
"Fore",
".",
"YELLOW",
"+",
"' mode'",
")",
"print",
"(",
"Fore",
".",
"BLUE",
"+",
"'-='",
"*",
"20",
")",
"print",
"(",
"Style",
".",
"RESET_ALL",
")",
"if",
"console_log",
":",
"console_log_run",
"(",
"app",
",",
"port",
",",
"use_reloader",
")",
"else",
":",
"debug_run",
"(",
"app",
",",
"port",
",",
"use_reloader",
")",
"else",
":",
"logging",
".",
"info",
"(",
"\"The Gunicorn 'superset runserver' command is deprecated. Please \"",
"\"use the 'gunicorn' command instead.\"",
")",
"addr_str",
"=",
"f' unix:{socket} '",
"if",
"socket",
"else",
"f' {address}:{port} '",
"cmd",
"=",
"(",
"'gunicorn '",
"f'-w {workers} '",
"f'--timeout {timeout} '",
"f'-b {addr_str} '",
"'--limit-request-line 0 '",
"'--limit-request-field_size 0 '",
"'superset:app'",
")",
"print",
"(",
"Fore",
".",
"GREEN",
"+",
"'Starting server with command: '",
")",
"print",
"(",
"Fore",
".",
"YELLOW",
"+",
"cmd",
")",
"print",
"(",
"Style",
".",
"RESET_ALL",
")",
"Popen",
"(",
"cmd",
",",
"shell",
"=",
"True",
")",
".",
"wait",
"(",
")"
] |
Starts a Superset web server.
|
[
"Starts",
"a",
"Superset",
"web",
"server",
"."
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/cli.py#L112-L144
|
21,511
|
apache/incubator-superset
|
superset/cli.py
|
version
|
def version(verbose):
"""Prints the current version number"""
print(Fore.BLUE + '-=' * 15)
print(Fore.YELLOW + 'Superset ' + Fore.CYAN + '{version}'.format(
version=config.get('VERSION_STRING')))
print(Fore.BLUE + '-=' * 15)
if verbose:
print('[DB] : ' + '{}'.format(db.engine))
print(Style.RESET_ALL)
|
python
|
def version(verbose):
"""Prints the current version number"""
print(Fore.BLUE + '-=' * 15)
print(Fore.YELLOW + 'Superset ' + Fore.CYAN + '{version}'.format(
version=config.get('VERSION_STRING')))
print(Fore.BLUE + '-=' * 15)
if verbose:
print('[DB] : ' + '{}'.format(db.engine))
print(Style.RESET_ALL)
|
[
"def",
"version",
"(",
"verbose",
")",
":",
"print",
"(",
"Fore",
".",
"BLUE",
"+",
"'-='",
"*",
"15",
")",
"print",
"(",
"Fore",
".",
"YELLOW",
"+",
"'Superset '",
"+",
"Fore",
".",
"CYAN",
"+",
"'{version}'",
".",
"format",
"(",
"version",
"=",
"config",
".",
"get",
"(",
"'VERSION_STRING'",
")",
")",
")",
"print",
"(",
"Fore",
".",
"BLUE",
"+",
"'-='",
"*",
"15",
")",
"if",
"verbose",
":",
"print",
"(",
"'[DB] : '",
"+",
"'{}'",
".",
"format",
"(",
"db",
".",
"engine",
")",
")",
"print",
"(",
"Style",
".",
"RESET_ALL",
")"
] |
Prints the current version number
|
[
"Prints",
"the",
"current",
"version",
"number"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/cli.py#L149-L157
|
21,512
|
apache/incubator-superset
|
superset/cli.py
|
refresh_druid
|
def refresh_druid(datasource, merge):
"""Refresh druid datasources"""
session = db.session()
from superset.connectors.druid.models import DruidCluster
for cluster in session.query(DruidCluster).all():
try:
cluster.refresh_datasources(datasource_name=datasource,
merge_flag=merge)
except Exception as e:
print(
"Error while processing cluster '{}'\n{}".format(
cluster, str(e)))
logging.exception(e)
cluster.metadata_last_refreshed = datetime.now()
print(
'Refreshed metadata from cluster '
'[' + cluster.cluster_name + ']')
session.commit()
|
python
|
def refresh_druid(datasource, merge):
"""Refresh druid datasources"""
session = db.session()
from superset.connectors.druid.models import DruidCluster
for cluster in session.query(DruidCluster).all():
try:
cluster.refresh_datasources(datasource_name=datasource,
merge_flag=merge)
except Exception as e:
print(
"Error while processing cluster '{}'\n{}".format(
cluster, str(e)))
logging.exception(e)
cluster.metadata_last_refreshed = datetime.now()
print(
'Refreshed metadata from cluster '
'[' + cluster.cluster_name + ']')
session.commit()
|
[
"def",
"refresh_druid",
"(",
"datasource",
",",
"merge",
")",
":",
"session",
"=",
"db",
".",
"session",
"(",
")",
"from",
"superset",
".",
"connectors",
".",
"druid",
".",
"models",
"import",
"DruidCluster",
"for",
"cluster",
"in",
"session",
".",
"query",
"(",
"DruidCluster",
")",
".",
"all",
"(",
")",
":",
"try",
":",
"cluster",
".",
"refresh_datasources",
"(",
"datasource_name",
"=",
"datasource",
",",
"merge_flag",
"=",
"merge",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"\"Error while processing cluster '{}'\\n{}\"",
".",
"format",
"(",
"cluster",
",",
"str",
"(",
"e",
")",
")",
")",
"logging",
".",
"exception",
"(",
"e",
")",
"cluster",
".",
"metadata_last_refreshed",
"=",
"datetime",
".",
"now",
"(",
")",
"print",
"(",
"'Refreshed metadata from cluster '",
"'['",
"+",
"cluster",
".",
"cluster_name",
"+",
"']'",
")",
"session",
".",
"commit",
"(",
")"
] |
Refresh druid datasources
|
[
"Refresh",
"druid",
"datasources"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/cli.py#L225-L242
|
21,513
|
apache/incubator-superset
|
superset/cli.py
|
import_dashboards
|
def import_dashboards(path, recursive):
"""Import dashboards from JSON"""
p = Path(path)
files = []
if p.is_file():
files.append(p)
elif p.exists() and not recursive:
files.extend(p.glob('*.json'))
elif p.exists() and recursive:
files.extend(p.rglob('*.json'))
for f in files:
logging.info('Importing dashboard from file %s', f)
try:
with f.open() as data_stream:
dashboard_import_export.import_dashboards(
db.session, data_stream)
except Exception as e:
logging.error('Error when importing dashboard from file %s', f)
logging.error(e)
|
python
|
def import_dashboards(path, recursive):
"""Import dashboards from JSON"""
p = Path(path)
files = []
if p.is_file():
files.append(p)
elif p.exists() and not recursive:
files.extend(p.glob('*.json'))
elif p.exists() and recursive:
files.extend(p.rglob('*.json'))
for f in files:
logging.info('Importing dashboard from file %s', f)
try:
with f.open() as data_stream:
dashboard_import_export.import_dashboards(
db.session, data_stream)
except Exception as e:
logging.error('Error when importing dashboard from file %s', f)
logging.error(e)
|
[
"def",
"import_dashboards",
"(",
"path",
",",
"recursive",
")",
":",
"p",
"=",
"Path",
"(",
"path",
")",
"files",
"=",
"[",
"]",
"if",
"p",
".",
"is_file",
"(",
")",
":",
"files",
".",
"append",
"(",
"p",
")",
"elif",
"p",
".",
"exists",
"(",
")",
"and",
"not",
"recursive",
":",
"files",
".",
"extend",
"(",
"p",
".",
"glob",
"(",
"'*.json'",
")",
")",
"elif",
"p",
".",
"exists",
"(",
")",
"and",
"recursive",
":",
"files",
".",
"extend",
"(",
"p",
".",
"rglob",
"(",
"'*.json'",
")",
")",
"for",
"f",
"in",
"files",
":",
"logging",
".",
"info",
"(",
"'Importing dashboard from file %s'",
",",
"f",
")",
"try",
":",
"with",
"f",
".",
"open",
"(",
")",
"as",
"data_stream",
":",
"dashboard_import_export",
".",
"import_dashboards",
"(",
"db",
".",
"session",
",",
"data_stream",
")",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"error",
"(",
"'Error when importing dashboard from file %s'",
",",
"f",
")",
"logging",
".",
"error",
"(",
"e",
")"
] |
Import dashboards from JSON
|
[
"Import",
"dashboards",
"from",
"JSON"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/cli.py#L253-L271
|
21,514
|
apache/incubator-superset
|
superset/cli.py
|
export_dashboards
|
def export_dashboards(print_stdout, dashboard_file):
"""Export dashboards to JSON"""
data = dashboard_import_export.export_dashboards(db.session)
if print_stdout or not dashboard_file:
print(data)
if dashboard_file:
logging.info('Exporting dashboards to %s', dashboard_file)
with open(dashboard_file, 'w') as data_stream:
data_stream.write(data)
|
python
|
def export_dashboards(print_stdout, dashboard_file):
"""Export dashboards to JSON"""
data = dashboard_import_export.export_dashboards(db.session)
if print_stdout or not dashboard_file:
print(data)
if dashboard_file:
logging.info('Exporting dashboards to %s', dashboard_file)
with open(dashboard_file, 'w') as data_stream:
data_stream.write(data)
|
[
"def",
"export_dashboards",
"(",
"print_stdout",
",",
"dashboard_file",
")",
":",
"data",
"=",
"dashboard_import_export",
".",
"export_dashboards",
"(",
"db",
".",
"session",
")",
"if",
"print_stdout",
"or",
"not",
"dashboard_file",
":",
"print",
"(",
"data",
")",
"if",
"dashboard_file",
":",
"logging",
".",
"info",
"(",
"'Exporting dashboards to %s'",
",",
"dashboard_file",
")",
"with",
"open",
"(",
"dashboard_file",
",",
"'w'",
")",
"as",
"data_stream",
":",
"data_stream",
".",
"write",
"(",
"data",
")"
] |
Export dashboards to JSON
|
[
"Export",
"dashboards",
"to",
"JSON"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/cli.py#L281-L289
|
21,515
|
apache/incubator-superset
|
superset/cli.py
|
import_datasources
|
def import_datasources(path, sync, recursive):
"""Import datasources from YAML"""
sync_array = sync.split(',')
p = Path(path)
files = []
if p.is_file():
files.append(p)
elif p.exists() and not recursive:
files.extend(p.glob('*.yaml'))
files.extend(p.glob('*.yml'))
elif p.exists() and recursive:
files.extend(p.rglob('*.yaml'))
files.extend(p.rglob('*.yml'))
for f in files:
logging.info('Importing datasources from file %s', f)
try:
with f.open() as data_stream:
dict_import_export.import_from_dict(
db.session,
yaml.safe_load(data_stream),
sync=sync_array)
except Exception as e:
logging.error('Error when importing datasources from file %s', f)
logging.error(e)
|
python
|
def import_datasources(path, sync, recursive):
"""Import datasources from YAML"""
sync_array = sync.split(',')
p = Path(path)
files = []
if p.is_file():
files.append(p)
elif p.exists() and not recursive:
files.extend(p.glob('*.yaml'))
files.extend(p.glob('*.yml'))
elif p.exists() and recursive:
files.extend(p.rglob('*.yaml'))
files.extend(p.rglob('*.yml'))
for f in files:
logging.info('Importing datasources from file %s', f)
try:
with f.open() as data_stream:
dict_import_export.import_from_dict(
db.session,
yaml.safe_load(data_stream),
sync=sync_array)
except Exception as e:
logging.error('Error when importing datasources from file %s', f)
logging.error(e)
|
[
"def",
"import_datasources",
"(",
"path",
",",
"sync",
",",
"recursive",
")",
":",
"sync_array",
"=",
"sync",
".",
"split",
"(",
"','",
")",
"p",
"=",
"Path",
"(",
"path",
")",
"files",
"=",
"[",
"]",
"if",
"p",
".",
"is_file",
"(",
")",
":",
"files",
".",
"append",
"(",
"p",
")",
"elif",
"p",
".",
"exists",
"(",
")",
"and",
"not",
"recursive",
":",
"files",
".",
"extend",
"(",
"p",
".",
"glob",
"(",
"'*.yaml'",
")",
")",
"files",
".",
"extend",
"(",
"p",
".",
"glob",
"(",
"'*.yml'",
")",
")",
"elif",
"p",
".",
"exists",
"(",
")",
"and",
"recursive",
":",
"files",
".",
"extend",
"(",
"p",
".",
"rglob",
"(",
"'*.yaml'",
")",
")",
"files",
".",
"extend",
"(",
"p",
".",
"rglob",
"(",
"'*.yml'",
")",
")",
"for",
"f",
"in",
"files",
":",
"logging",
".",
"info",
"(",
"'Importing datasources from file %s'",
",",
"f",
")",
"try",
":",
"with",
"f",
".",
"open",
"(",
")",
"as",
"data_stream",
":",
"dict_import_export",
".",
"import_from_dict",
"(",
"db",
".",
"session",
",",
"yaml",
".",
"safe_load",
"(",
"data_stream",
")",
",",
"sync",
"=",
"sync_array",
")",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"error",
"(",
"'Error when importing datasources from file %s'",
",",
"f",
")",
"logging",
".",
"error",
"(",
"e",
")"
] |
Import datasources from YAML
|
[
"Import",
"datasources",
"from",
"YAML"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/cli.py#L305-L328
|
21,516
|
apache/incubator-superset
|
superset/cli.py
|
export_datasources
|
def export_datasources(print_stdout, datasource_file,
back_references, include_defaults):
"""Export datasources to YAML"""
data = dict_import_export.export_to_dict(
session=db.session,
recursive=True,
back_references=back_references,
include_defaults=include_defaults)
if print_stdout or not datasource_file:
yaml.safe_dump(data, stdout, default_flow_style=False)
if datasource_file:
logging.info('Exporting datasources to %s', datasource_file)
with open(datasource_file, 'w') as data_stream:
yaml.safe_dump(data, data_stream, default_flow_style=False)
|
python
|
def export_datasources(print_stdout, datasource_file,
back_references, include_defaults):
"""Export datasources to YAML"""
data = dict_import_export.export_to_dict(
session=db.session,
recursive=True,
back_references=back_references,
include_defaults=include_defaults)
if print_stdout or not datasource_file:
yaml.safe_dump(data, stdout, default_flow_style=False)
if datasource_file:
logging.info('Exporting datasources to %s', datasource_file)
with open(datasource_file, 'w') as data_stream:
yaml.safe_dump(data, data_stream, default_flow_style=False)
|
[
"def",
"export_datasources",
"(",
"print_stdout",
",",
"datasource_file",
",",
"back_references",
",",
"include_defaults",
")",
":",
"data",
"=",
"dict_import_export",
".",
"export_to_dict",
"(",
"session",
"=",
"db",
".",
"session",
",",
"recursive",
"=",
"True",
",",
"back_references",
"=",
"back_references",
",",
"include_defaults",
"=",
"include_defaults",
")",
"if",
"print_stdout",
"or",
"not",
"datasource_file",
":",
"yaml",
".",
"safe_dump",
"(",
"data",
",",
"stdout",
",",
"default_flow_style",
"=",
"False",
")",
"if",
"datasource_file",
":",
"logging",
".",
"info",
"(",
"'Exporting datasources to %s'",
",",
"datasource_file",
")",
"with",
"open",
"(",
"datasource_file",
",",
"'w'",
")",
"as",
"data_stream",
":",
"yaml",
".",
"safe_dump",
"(",
"data",
",",
"data_stream",
",",
"default_flow_style",
"=",
"False",
")"
] |
Export datasources to YAML
|
[
"Export",
"datasources",
"to",
"YAML"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/cli.py#L344-L357
|
21,517
|
apache/incubator-superset
|
superset/cli.py
|
export_datasource_schema
|
def export_datasource_schema(back_references):
"""Export datasource YAML schema to stdout"""
data = dict_import_export.export_schema_to_dict(
back_references=back_references)
yaml.safe_dump(data, stdout, default_flow_style=False)
|
python
|
def export_datasource_schema(back_references):
"""Export datasource YAML schema to stdout"""
data = dict_import_export.export_schema_to_dict(
back_references=back_references)
yaml.safe_dump(data, stdout, default_flow_style=False)
|
[
"def",
"export_datasource_schema",
"(",
"back_references",
")",
":",
"data",
"=",
"dict_import_export",
".",
"export_schema_to_dict",
"(",
"back_references",
"=",
"back_references",
")",
"yaml",
".",
"safe_dump",
"(",
"data",
",",
"stdout",
",",
"default_flow_style",
"=",
"False",
")"
] |
Export datasource YAML schema to stdout
|
[
"Export",
"datasource",
"YAML",
"schema",
"to",
"stdout"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/cli.py#L364-L368
|
21,518
|
apache/incubator-superset
|
superset/cli.py
|
update_datasources_cache
|
def update_datasources_cache():
"""Refresh sqllab datasources cache"""
from superset.models.core import Database
for database in db.session.query(Database).all():
if database.allow_multi_schema_metadata_fetch:
print('Fetching {} datasources ...'.format(database.name))
try:
database.all_table_names_in_database(
force=True, cache=True, cache_timeout=24 * 60 * 60)
database.all_view_names_in_database(
force=True, cache=True, cache_timeout=24 * 60 * 60)
except Exception as e:
print('{}'.format(str(e)))
|
python
|
def update_datasources_cache():
"""Refresh sqllab datasources cache"""
from superset.models.core import Database
for database in db.session.query(Database).all():
if database.allow_multi_schema_metadata_fetch:
print('Fetching {} datasources ...'.format(database.name))
try:
database.all_table_names_in_database(
force=True, cache=True, cache_timeout=24 * 60 * 60)
database.all_view_names_in_database(
force=True, cache=True, cache_timeout=24 * 60 * 60)
except Exception as e:
print('{}'.format(str(e)))
|
[
"def",
"update_datasources_cache",
"(",
")",
":",
"from",
"superset",
".",
"models",
".",
"core",
"import",
"Database",
"for",
"database",
"in",
"db",
".",
"session",
".",
"query",
"(",
"Database",
")",
".",
"all",
"(",
")",
":",
"if",
"database",
".",
"allow_multi_schema_metadata_fetch",
":",
"print",
"(",
"'Fetching {} datasources ...'",
".",
"format",
"(",
"database",
".",
"name",
")",
")",
"try",
":",
"database",
".",
"all_table_names_in_database",
"(",
"force",
"=",
"True",
",",
"cache",
"=",
"True",
",",
"cache_timeout",
"=",
"24",
"*",
"60",
"*",
"60",
")",
"database",
".",
"all_view_names_in_database",
"(",
"force",
"=",
"True",
",",
"cache",
"=",
"True",
",",
"cache_timeout",
"=",
"24",
"*",
"60",
"*",
"60",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"'{}'",
".",
"format",
"(",
"str",
"(",
"e",
")",
")",
")"
] |
Refresh sqllab datasources cache
|
[
"Refresh",
"sqllab",
"datasources",
"cache"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/cli.py#L372-L384
|
21,519
|
apache/incubator-superset
|
superset/cli.py
|
worker
|
def worker(workers):
"""Starts a Superset worker for async SQL query execution."""
logging.info(
"The 'superset worker' command is deprecated. Please use the 'celery "
"worker' command instead.")
if workers:
celery_app.conf.update(CELERYD_CONCURRENCY=workers)
elif config.get('SUPERSET_CELERY_WORKERS'):
celery_app.conf.update(
CELERYD_CONCURRENCY=config.get('SUPERSET_CELERY_WORKERS'))
worker = celery_app.Worker(optimization='fair')
worker.start()
|
python
|
def worker(workers):
"""Starts a Superset worker for async SQL query execution."""
logging.info(
"The 'superset worker' command is deprecated. Please use the 'celery "
"worker' command instead.")
if workers:
celery_app.conf.update(CELERYD_CONCURRENCY=workers)
elif config.get('SUPERSET_CELERY_WORKERS'):
celery_app.conf.update(
CELERYD_CONCURRENCY=config.get('SUPERSET_CELERY_WORKERS'))
worker = celery_app.Worker(optimization='fair')
worker.start()
|
[
"def",
"worker",
"(",
"workers",
")",
":",
"logging",
".",
"info",
"(",
"\"The 'superset worker' command is deprecated. Please use the 'celery \"",
"\"worker' command instead.\"",
")",
"if",
"workers",
":",
"celery_app",
".",
"conf",
".",
"update",
"(",
"CELERYD_CONCURRENCY",
"=",
"workers",
")",
"elif",
"config",
".",
"get",
"(",
"'SUPERSET_CELERY_WORKERS'",
")",
":",
"celery_app",
".",
"conf",
".",
"update",
"(",
"CELERYD_CONCURRENCY",
"=",
"config",
".",
"get",
"(",
"'SUPERSET_CELERY_WORKERS'",
")",
")",
"worker",
"=",
"celery_app",
".",
"Worker",
"(",
"optimization",
"=",
"'fair'",
")",
"worker",
".",
"start",
"(",
")"
] |
Starts a Superset worker for async SQL query execution.
|
[
"Starts",
"a",
"Superset",
"worker",
"for",
"async",
"SQL",
"query",
"execution",
"."
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/cli.py#L392-L404
|
21,520
|
apache/incubator-superset
|
superset/cli.py
|
flower
|
def flower(port, address):
"""Runs a Celery Flower web server
Celery Flower is a UI to monitor the Celery operation on a given
broker"""
BROKER_URL = celery_app.conf.BROKER_URL
cmd = (
'celery flower '
f'--broker={BROKER_URL} '
f'--port={port} '
f'--address={address} '
)
logging.info(
"The 'superset flower' command is deprecated. Please use the 'celery "
"flower' command instead.")
print(Fore.GREEN + 'Starting a Celery Flower instance')
print(Fore.BLUE + '-=' * 40)
print(Fore.YELLOW + cmd)
print(Fore.BLUE + '-=' * 40)
Popen(cmd, shell=True).wait()
|
python
|
def flower(port, address):
"""Runs a Celery Flower web server
Celery Flower is a UI to monitor the Celery operation on a given
broker"""
BROKER_URL = celery_app.conf.BROKER_URL
cmd = (
'celery flower '
f'--broker={BROKER_URL} '
f'--port={port} '
f'--address={address} '
)
logging.info(
"The 'superset flower' command is deprecated. Please use the 'celery "
"flower' command instead.")
print(Fore.GREEN + 'Starting a Celery Flower instance')
print(Fore.BLUE + '-=' * 40)
print(Fore.YELLOW + cmd)
print(Fore.BLUE + '-=' * 40)
Popen(cmd, shell=True).wait()
|
[
"def",
"flower",
"(",
"port",
",",
"address",
")",
":",
"BROKER_URL",
"=",
"celery_app",
".",
"conf",
".",
"BROKER_URL",
"cmd",
"=",
"(",
"'celery flower '",
"f'--broker={BROKER_URL} '",
"f'--port={port} '",
"f'--address={address} '",
")",
"logging",
".",
"info",
"(",
"\"The 'superset flower' command is deprecated. Please use the 'celery \"",
"\"flower' command instead.\"",
")",
"print",
"(",
"Fore",
".",
"GREEN",
"+",
"'Starting a Celery Flower instance'",
")",
"print",
"(",
"Fore",
".",
"BLUE",
"+",
"'-='",
"*",
"40",
")",
"print",
"(",
"Fore",
".",
"YELLOW",
"+",
"cmd",
")",
"print",
"(",
"Fore",
".",
"BLUE",
"+",
"'-='",
"*",
"40",
")",
"Popen",
"(",
"cmd",
",",
"shell",
"=",
"True",
")",
".",
"wait",
"(",
")"
] |
Runs a Celery Flower web server
Celery Flower is a UI to monitor the Celery operation on a given
broker
|
[
"Runs",
"a",
"Celery",
"Flower",
"web",
"server"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/cli.py#L416-L435
|
21,521
|
apache/incubator-superset
|
superset/connectors/druid/views.py
|
Druid.refresh_datasources
|
def refresh_datasources(self, refreshAll=True):
"""endpoint that refreshes druid datasources metadata"""
session = db.session()
DruidCluster = ConnectorRegistry.sources['druid'].cluster_class
for cluster in session.query(DruidCluster).all():
cluster_name = cluster.cluster_name
valid_cluster = True
try:
cluster.refresh_datasources(refreshAll=refreshAll)
except Exception as e:
valid_cluster = False
flash(
"Error while processing cluster '{}'\n{}".format(
cluster_name, utils.error_msg_from_exception(e)),
'danger')
logging.exception(e)
pass
if valid_cluster:
cluster.metadata_last_refreshed = datetime.now()
flash(
_('Refreshed metadata from cluster [{}]').format(
cluster.cluster_name),
'info')
session.commit()
return redirect('/druiddatasourcemodelview/list/')
|
python
|
def refresh_datasources(self, refreshAll=True):
"""endpoint that refreshes druid datasources metadata"""
session = db.session()
DruidCluster = ConnectorRegistry.sources['druid'].cluster_class
for cluster in session.query(DruidCluster).all():
cluster_name = cluster.cluster_name
valid_cluster = True
try:
cluster.refresh_datasources(refreshAll=refreshAll)
except Exception as e:
valid_cluster = False
flash(
"Error while processing cluster '{}'\n{}".format(
cluster_name, utils.error_msg_from_exception(e)),
'danger')
logging.exception(e)
pass
if valid_cluster:
cluster.metadata_last_refreshed = datetime.now()
flash(
_('Refreshed metadata from cluster [{}]').format(
cluster.cluster_name),
'info')
session.commit()
return redirect('/druiddatasourcemodelview/list/')
|
[
"def",
"refresh_datasources",
"(",
"self",
",",
"refreshAll",
"=",
"True",
")",
":",
"session",
"=",
"db",
".",
"session",
"(",
")",
"DruidCluster",
"=",
"ConnectorRegistry",
".",
"sources",
"[",
"'druid'",
"]",
".",
"cluster_class",
"for",
"cluster",
"in",
"session",
".",
"query",
"(",
"DruidCluster",
")",
".",
"all",
"(",
")",
":",
"cluster_name",
"=",
"cluster",
".",
"cluster_name",
"valid_cluster",
"=",
"True",
"try",
":",
"cluster",
".",
"refresh_datasources",
"(",
"refreshAll",
"=",
"refreshAll",
")",
"except",
"Exception",
"as",
"e",
":",
"valid_cluster",
"=",
"False",
"flash",
"(",
"\"Error while processing cluster '{}'\\n{}\"",
".",
"format",
"(",
"cluster_name",
",",
"utils",
".",
"error_msg_from_exception",
"(",
"e",
")",
")",
",",
"'danger'",
")",
"logging",
".",
"exception",
"(",
"e",
")",
"pass",
"if",
"valid_cluster",
":",
"cluster",
".",
"metadata_last_refreshed",
"=",
"datetime",
".",
"now",
"(",
")",
"flash",
"(",
"_",
"(",
"'Refreshed metadata from cluster [{}]'",
")",
".",
"format",
"(",
"cluster",
".",
"cluster_name",
")",
",",
"'info'",
")",
"session",
".",
"commit",
"(",
")",
"return",
"redirect",
"(",
"'/druiddatasourcemodelview/list/'",
")"
] |
endpoint that refreshes druid datasources metadata
|
[
"endpoint",
"that",
"refreshes",
"druid",
"datasources",
"metadata"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/druid/views.py#L339-L363
|
21,522
|
keon/algorithms
|
algorithms/linkedlist/add_two_numbers.py
|
convert_to_str
|
def convert_to_str(l: Node) -> str:
"""
converts the non-negative number list into a string.
"""
result = ""
while l:
result += str(l.val)
l = l.next
return result
|
python
|
def convert_to_str(l: Node) -> str:
"""
converts the non-negative number list into a string.
"""
result = ""
while l:
result += str(l.val)
l = l.next
return result
|
[
"def",
"convert_to_str",
"(",
"l",
":",
"Node",
")",
"->",
"str",
":",
"result",
"=",
"\"\"",
"while",
"l",
":",
"result",
"+=",
"str",
"(",
"l",
".",
"val",
")",
"l",
"=",
"l",
".",
"next",
"return",
"result"
] |
converts the non-negative number list into a string.
|
[
"converts",
"the",
"non",
"-",
"negative",
"number",
"list",
"into",
"a",
"string",
"."
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/linkedlist/add_two_numbers.py#L66-L74
|
21,523
|
keon/algorithms
|
algorithms/sort/cocktail_shaker_sort.py
|
cocktail_shaker_sort
|
def cocktail_shaker_sort(arr):
"""
Cocktail_shaker_sort
Sorting a given array
mutation of bubble sort
reference: https://en.wikipedia.org/wiki/Cocktail_shaker_sort
Worst-case performance: O(N^2)
"""
def swap(i, j):
arr[i], arr[j] = arr[j], arr[i]
n = len(arr)
swapped = True
while swapped:
swapped = False
for i in range(1, n):
if arr[i - 1] > arr[i]:
swap(i - 1, i)
swapped = True
if swapped == False:
return arr
swapped = False
for i in range(n-1,0,-1):
if arr[i - 1] > arr[i]:
swap(i - 1, i)
swapped = True
return arr
|
python
|
def cocktail_shaker_sort(arr):
"""
Cocktail_shaker_sort
Sorting a given array
mutation of bubble sort
reference: https://en.wikipedia.org/wiki/Cocktail_shaker_sort
Worst-case performance: O(N^2)
"""
def swap(i, j):
arr[i], arr[j] = arr[j], arr[i]
n = len(arr)
swapped = True
while swapped:
swapped = False
for i in range(1, n):
if arr[i - 1] > arr[i]:
swap(i - 1, i)
swapped = True
if swapped == False:
return arr
swapped = False
for i in range(n-1,0,-1):
if arr[i - 1] > arr[i]:
swap(i - 1, i)
swapped = True
return arr
|
[
"def",
"cocktail_shaker_sort",
"(",
"arr",
")",
":",
"def",
"swap",
"(",
"i",
",",
"j",
")",
":",
"arr",
"[",
"i",
"]",
",",
"arr",
"[",
"j",
"]",
"=",
"arr",
"[",
"j",
"]",
",",
"arr",
"[",
"i",
"]",
"n",
"=",
"len",
"(",
"arr",
")",
"swapped",
"=",
"True",
"while",
"swapped",
":",
"swapped",
"=",
"False",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"n",
")",
":",
"if",
"arr",
"[",
"i",
"-",
"1",
"]",
">",
"arr",
"[",
"i",
"]",
":",
"swap",
"(",
"i",
"-",
"1",
",",
"i",
")",
"swapped",
"=",
"True",
"if",
"swapped",
"==",
"False",
":",
"return",
"arr",
"swapped",
"=",
"False",
"for",
"i",
"in",
"range",
"(",
"n",
"-",
"1",
",",
"0",
",",
"-",
"1",
")",
":",
"if",
"arr",
"[",
"i",
"-",
"1",
"]",
">",
"arr",
"[",
"i",
"]",
":",
"swap",
"(",
"i",
"-",
"1",
",",
"i",
")",
"swapped",
"=",
"True",
"return",
"arr"
] |
Cocktail_shaker_sort
Sorting a given array
mutation of bubble sort
reference: https://en.wikipedia.org/wiki/Cocktail_shaker_sort
Worst-case performance: O(N^2)
|
[
"Cocktail_shaker_sort",
"Sorting",
"a",
"given",
"array",
"mutation",
"of",
"bubble",
"sort"
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/sort/cocktail_shaker_sort.py#L1-L30
|
21,524
|
keon/algorithms
|
algorithms/strings/longest_common_prefix.py
|
common_prefix
|
def common_prefix(s1, s2):
"Return prefix common of 2 strings"
if not s1 or not s2:
return ""
k = 0
while s1[k] == s2[k]:
k = k + 1
if k >= len(s1) or k >= len(s2):
return s1[0:k]
return s1[0:k]
|
python
|
def common_prefix(s1, s2):
"Return prefix common of 2 strings"
if not s1 or not s2:
return ""
k = 0
while s1[k] == s2[k]:
k = k + 1
if k >= len(s1) or k >= len(s2):
return s1[0:k]
return s1[0:k]
|
[
"def",
"common_prefix",
"(",
"s1",
",",
"s2",
")",
":",
"if",
"not",
"s1",
"or",
"not",
"s2",
":",
"return",
"\"\"",
"k",
"=",
"0",
"while",
"s1",
"[",
"k",
"]",
"==",
"s2",
"[",
"k",
"]",
":",
"k",
"=",
"k",
"+",
"1",
"if",
"k",
">=",
"len",
"(",
"s1",
")",
"or",
"k",
">=",
"len",
"(",
"s2",
")",
":",
"return",
"s1",
"[",
"0",
":",
"k",
"]",
"return",
"s1",
"[",
"0",
":",
"k",
"]"
] |
Return prefix common of 2 strings
|
[
"Return",
"prefix",
"common",
"of",
"2",
"strings"
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/strings/longest_common_prefix.py#L21-L30
|
21,525
|
keon/algorithms
|
algorithms/strings/min_distance.py
|
lcs
|
def lcs(s1, s2, i, j):
"""
The length of longest common subsequence among the two given strings s1 and s2
"""
if i == 0 or j == 0:
return 0
elif s1[i - 1] == s2[j - 1]:
return 1 + lcs(s1, s2, i - 1, j - 1)
else:
return max(lcs(s1, s2, i - 1, j), lcs(s1, s2, i, j - 1))
|
python
|
def lcs(s1, s2, i, j):
"""
The length of longest common subsequence among the two given strings s1 and s2
"""
if i == 0 or j == 0:
return 0
elif s1[i - 1] == s2[j - 1]:
return 1 + lcs(s1, s2, i - 1, j - 1)
else:
return max(lcs(s1, s2, i - 1, j), lcs(s1, s2, i, j - 1))
|
[
"def",
"lcs",
"(",
"s1",
",",
"s2",
",",
"i",
",",
"j",
")",
":",
"if",
"i",
"==",
"0",
"or",
"j",
"==",
"0",
":",
"return",
"0",
"elif",
"s1",
"[",
"i",
"-",
"1",
"]",
"==",
"s2",
"[",
"j",
"-",
"1",
"]",
":",
"return",
"1",
"+",
"lcs",
"(",
"s1",
",",
"s2",
",",
"i",
"-",
"1",
",",
"j",
"-",
"1",
")",
"else",
":",
"return",
"max",
"(",
"lcs",
"(",
"s1",
",",
"s2",
",",
"i",
"-",
"1",
",",
"j",
")",
",",
"lcs",
"(",
"s1",
",",
"s2",
",",
"i",
",",
"j",
"-",
"1",
")",
")"
] |
The length of longest common subsequence among the two given strings s1 and s2
|
[
"The",
"length",
"of",
"longest",
"common",
"subsequence",
"among",
"the",
"two",
"given",
"strings",
"s1",
"and",
"s2"
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/strings/min_distance.py#L17-L26
|
21,526
|
keon/algorithms
|
algorithms/maths/nth_digit.py
|
find_nth_digit
|
def find_nth_digit(n):
"""find the nth digit of given number.
1. find the length of the number where the nth digit is from.
2. find the actual number where the nth digit is from
3. find the nth digit and return
"""
length = 1
count = 9
start = 1
while n > length * count:
n -= length * count
length += 1
count *= 10
start *= 10
start += (n-1) / length
s = str(start)
return int(s[(n-1) % length])
|
python
|
def find_nth_digit(n):
"""find the nth digit of given number.
1. find the length of the number where the nth digit is from.
2. find the actual number where the nth digit is from
3. find the nth digit and return
"""
length = 1
count = 9
start = 1
while n > length * count:
n -= length * count
length += 1
count *= 10
start *= 10
start += (n-1) / length
s = str(start)
return int(s[(n-1) % length])
|
[
"def",
"find_nth_digit",
"(",
"n",
")",
":",
"length",
"=",
"1",
"count",
"=",
"9",
"start",
"=",
"1",
"while",
"n",
">",
"length",
"*",
"count",
":",
"n",
"-=",
"length",
"*",
"count",
"length",
"+=",
"1",
"count",
"*=",
"10",
"start",
"*=",
"10",
"start",
"+=",
"(",
"n",
"-",
"1",
")",
"/",
"length",
"s",
"=",
"str",
"(",
"start",
")",
"return",
"int",
"(",
"s",
"[",
"(",
"n",
"-",
"1",
")",
"%",
"length",
"]",
")"
] |
find the nth digit of given number.
1. find the length of the number where the nth digit is from.
2. find the actual number where the nth digit is from
3. find the nth digit and return
|
[
"find",
"the",
"nth",
"digit",
"of",
"given",
"number",
".",
"1",
".",
"find",
"the",
"length",
"of",
"the",
"number",
"where",
"the",
"nth",
"digit",
"is",
"from",
".",
"2",
".",
"find",
"the",
"actual",
"number",
"where",
"the",
"nth",
"digit",
"is",
"from",
"3",
".",
"find",
"the",
"nth",
"digit",
"and",
"return"
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/maths/nth_digit.py#L1-L17
|
21,527
|
keon/algorithms
|
algorithms/maths/prime_check.py
|
prime_check
|
def prime_check(n):
"""Return True if n is a prime number
Else return False.
"""
if n <= 1:
return False
if n == 2 or n == 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
j = 5
while j * j <= n:
if n % j == 0 or n % (j + 2) == 0:
return False
j += 6
return True
|
python
|
def prime_check(n):
"""Return True if n is a prime number
Else return False.
"""
if n <= 1:
return False
if n == 2 or n == 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
j = 5
while j * j <= n:
if n % j == 0 or n % (j + 2) == 0:
return False
j += 6
return True
|
[
"def",
"prime_check",
"(",
"n",
")",
":",
"if",
"n",
"<=",
"1",
":",
"return",
"False",
"if",
"n",
"==",
"2",
"or",
"n",
"==",
"3",
":",
"return",
"True",
"if",
"n",
"%",
"2",
"==",
"0",
"or",
"n",
"%",
"3",
"==",
"0",
":",
"return",
"False",
"j",
"=",
"5",
"while",
"j",
"*",
"j",
"<=",
"n",
":",
"if",
"n",
"%",
"j",
"==",
"0",
"or",
"n",
"%",
"(",
"j",
"+",
"2",
")",
"==",
"0",
":",
"return",
"False",
"j",
"+=",
"6",
"return",
"True"
] |
Return True if n is a prime number
Else return False.
|
[
"Return",
"True",
"if",
"n",
"is",
"a",
"prime",
"number",
"Else",
"return",
"False",
"."
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/maths/prime_check.py#L1-L17
|
21,528
|
keon/algorithms
|
algorithms/arrays/longest_non_repeat.py
|
longest_non_repeat_v1
|
def longest_non_repeat_v1(string):
"""
Find the length of the longest substring
without repeating characters.
"""
if string is None:
return 0
dict = {}
max_length = 0
j = 0
for i in range(len(string)):
if string[i] in dict:
j = max(dict[string[i]], j)
dict[string[i]] = i + 1
max_length = max(max_length, i - j + 1)
return max_length
|
python
|
def longest_non_repeat_v1(string):
"""
Find the length of the longest substring
without repeating characters.
"""
if string is None:
return 0
dict = {}
max_length = 0
j = 0
for i in range(len(string)):
if string[i] in dict:
j = max(dict[string[i]], j)
dict[string[i]] = i + 1
max_length = max(max_length, i - j + 1)
return max_length
|
[
"def",
"longest_non_repeat_v1",
"(",
"string",
")",
":",
"if",
"string",
"is",
"None",
":",
"return",
"0",
"dict",
"=",
"{",
"}",
"max_length",
"=",
"0",
"j",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"string",
")",
")",
":",
"if",
"string",
"[",
"i",
"]",
"in",
"dict",
":",
"j",
"=",
"max",
"(",
"dict",
"[",
"string",
"[",
"i",
"]",
"]",
",",
"j",
")",
"dict",
"[",
"string",
"[",
"i",
"]",
"]",
"=",
"i",
"+",
"1",
"max_length",
"=",
"max",
"(",
"max_length",
",",
"i",
"-",
"j",
"+",
"1",
")",
"return",
"max_length"
] |
Find the length of the longest substring
without repeating characters.
|
[
"Find",
"the",
"length",
"of",
"the",
"longest",
"substring",
"without",
"repeating",
"characters",
"."
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/arrays/longest_non_repeat.py#L14-L29
|
21,529
|
keon/algorithms
|
algorithms/arrays/longest_non_repeat.py
|
longest_non_repeat_v2
|
def longest_non_repeat_v2(string):
"""
Find the length of the longest substring
without repeating characters.
Uses alternative algorithm.
"""
if string is None:
return 0
start, max_len = 0, 0
used_char = {}
for index, char in enumerate(string):
if char in used_char and start <= used_char[char]:
start = used_char[char] + 1
else:
max_len = max(max_len, index - start + 1)
used_char[char] = index
return max_len
|
python
|
def longest_non_repeat_v2(string):
"""
Find the length of the longest substring
without repeating characters.
Uses alternative algorithm.
"""
if string is None:
return 0
start, max_len = 0, 0
used_char = {}
for index, char in enumerate(string):
if char in used_char and start <= used_char[char]:
start = used_char[char] + 1
else:
max_len = max(max_len, index - start + 1)
used_char[char] = index
return max_len
|
[
"def",
"longest_non_repeat_v2",
"(",
"string",
")",
":",
"if",
"string",
"is",
"None",
":",
"return",
"0",
"start",
",",
"max_len",
"=",
"0",
",",
"0",
"used_char",
"=",
"{",
"}",
"for",
"index",
",",
"char",
"in",
"enumerate",
"(",
"string",
")",
":",
"if",
"char",
"in",
"used_char",
"and",
"start",
"<=",
"used_char",
"[",
"char",
"]",
":",
"start",
"=",
"used_char",
"[",
"char",
"]",
"+",
"1",
"else",
":",
"max_len",
"=",
"max",
"(",
"max_len",
",",
"index",
"-",
"start",
"+",
"1",
")",
"used_char",
"[",
"char",
"]",
"=",
"index",
"return",
"max_len"
] |
Find the length of the longest substring
without repeating characters.
Uses alternative algorithm.
|
[
"Find",
"the",
"length",
"of",
"the",
"longest",
"substring",
"without",
"repeating",
"characters",
".",
"Uses",
"alternative",
"algorithm",
"."
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/arrays/longest_non_repeat.py#L31-L47
|
21,530
|
keon/algorithms
|
algorithms/arrays/longest_non_repeat.py
|
get_longest_non_repeat_v1
|
def get_longest_non_repeat_v1(string):
"""
Find the length of the longest substring
without repeating characters.
Return max_len and the substring as a tuple
"""
if string is None:
return 0, ''
sub_string = ''
dict = {}
max_length = 0
j = 0
for i in range(len(string)):
if string[i] in dict:
j = max(dict[string[i]], j)
dict[string[i]] = i + 1
if i - j + 1 > max_length:
max_length = i - j + 1
sub_string = string[j: i + 1]
return max_length, sub_string
|
python
|
def get_longest_non_repeat_v1(string):
"""
Find the length of the longest substring
without repeating characters.
Return max_len and the substring as a tuple
"""
if string is None:
return 0, ''
sub_string = ''
dict = {}
max_length = 0
j = 0
for i in range(len(string)):
if string[i] in dict:
j = max(dict[string[i]], j)
dict[string[i]] = i + 1
if i - j + 1 > max_length:
max_length = i - j + 1
sub_string = string[j: i + 1]
return max_length, sub_string
|
[
"def",
"get_longest_non_repeat_v1",
"(",
"string",
")",
":",
"if",
"string",
"is",
"None",
":",
"return",
"0",
",",
"''",
"sub_string",
"=",
"''",
"dict",
"=",
"{",
"}",
"max_length",
"=",
"0",
"j",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"string",
")",
")",
":",
"if",
"string",
"[",
"i",
"]",
"in",
"dict",
":",
"j",
"=",
"max",
"(",
"dict",
"[",
"string",
"[",
"i",
"]",
"]",
",",
"j",
")",
"dict",
"[",
"string",
"[",
"i",
"]",
"]",
"=",
"i",
"+",
"1",
"if",
"i",
"-",
"j",
"+",
"1",
">",
"max_length",
":",
"max_length",
"=",
"i",
"-",
"j",
"+",
"1",
"sub_string",
"=",
"string",
"[",
"j",
":",
"i",
"+",
"1",
"]",
"return",
"max_length",
",",
"sub_string"
] |
Find the length of the longest substring
without repeating characters.
Return max_len and the substring as a tuple
|
[
"Find",
"the",
"length",
"of",
"the",
"longest",
"substring",
"without",
"repeating",
"characters",
".",
"Return",
"max_len",
"and",
"the",
"substring",
"as",
"a",
"tuple"
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/arrays/longest_non_repeat.py#L50-L69
|
21,531
|
keon/algorithms
|
algorithms/arrays/longest_non_repeat.py
|
get_longest_non_repeat_v2
|
def get_longest_non_repeat_v2(string):
"""
Find the length of the longest substring
without repeating characters.
Uses alternative algorithm.
Return max_len and the substring as a tuple
"""
if string is None:
return 0, ''
sub_string = ''
start, max_len = 0, 0
used_char = {}
for index, char in enumerate(string):
if char in used_char and start <= used_char[char]:
start = used_char[char] + 1
else:
if index - start + 1 > max_len:
max_len = index - start + 1
sub_string = string[start: index + 1]
used_char[char] = index
return max_len, sub_string
|
python
|
def get_longest_non_repeat_v2(string):
"""
Find the length of the longest substring
without repeating characters.
Uses alternative algorithm.
Return max_len and the substring as a tuple
"""
if string is None:
return 0, ''
sub_string = ''
start, max_len = 0, 0
used_char = {}
for index, char in enumerate(string):
if char in used_char and start <= used_char[char]:
start = used_char[char] + 1
else:
if index - start + 1 > max_len:
max_len = index - start + 1
sub_string = string[start: index + 1]
used_char[char] = index
return max_len, sub_string
|
[
"def",
"get_longest_non_repeat_v2",
"(",
"string",
")",
":",
"if",
"string",
"is",
"None",
":",
"return",
"0",
",",
"''",
"sub_string",
"=",
"''",
"start",
",",
"max_len",
"=",
"0",
",",
"0",
"used_char",
"=",
"{",
"}",
"for",
"index",
",",
"char",
"in",
"enumerate",
"(",
"string",
")",
":",
"if",
"char",
"in",
"used_char",
"and",
"start",
"<=",
"used_char",
"[",
"char",
"]",
":",
"start",
"=",
"used_char",
"[",
"char",
"]",
"+",
"1",
"else",
":",
"if",
"index",
"-",
"start",
"+",
"1",
">",
"max_len",
":",
"max_len",
"=",
"index",
"-",
"start",
"+",
"1",
"sub_string",
"=",
"string",
"[",
"start",
":",
"index",
"+",
"1",
"]",
"used_char",
"[",
"char",
"]",
"=",
"index",
"return",
"max_len",
",",
"sub_string"
] |
Find the length of the longest substring
without repeating characters.
Uses alternative algorithm.
Return max_len and the substring as a tuple
|
[
"Find",
"the",
"length",
"of",
"the",
"longest",
"substring",
"without",
"repeating",
"characters",
".",
"Uses",
"alternative",
"algorithm",
".",
"Return",
"max_len",
"and",
"the",
"substring",
"as",
"a",
"tuple"
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/arrays/longest_non_repeat.py#L71-L91
|
21,532
|
keon/algorithms
|
algorithms/queues/priority_queue.py
|
PriorityQueue.push
|
def push(self, item, priority=None):
"""Push the item in the priority queue.
if priority is not given, priority is set to the value of item.
"""
priority = item if priority is None else priority
node = PriorityQueueNode(item, priority)
for index, current in enumerate(self.priority_queue_list):
if current.priority < node.priority:
self.priority_queue_list.insert(index, node)
return
# when traversed complete queue
self.priority_queue_list.append(node)
|
python
|
def push(self, item, priority=None):
"""Push the item in the priority queue.
if priority is not given, priority is set to the value of item.
"""
priority = item if priority is None else priority
node = PriorityQueueNode(item, priority)
for index, current in enumerate(self.priority_queue_list):
if current.priority < node.priority:
self.priority_queue_list.insert(index, node)
return
# when traversed complete queue
self.priority_queue_list.append(node)
|
[
"def",
"push",
"(",
"self",
",",
"item",
",",
"priority",
"=",
"None",
")",
":",
"priority",
"=",
"item",
"if",
"priority",
"is",
"None",
"else",
"priority",
"node",
"=",
"PriorityQueueNode",
"(",
"item",
",",
"priority",
")",
"for",
"index",
",",
"current",
"in",
"enumerate",
"(",
"self",
".",
"priority_queue_list",
")",
":",
"if",
"current",
".",
"priority",
"<",
"node",
".",
"priority",
":",
"self",
".",
"priority_queue_list",
".",
"insert",
"(",
"index",
",",
"node",
")",
"return",
"# when traversed complete queue",
"self",
".",
"priority_queue_list",
".",
"append",
"(",
"node",
")"
] |
Push the item in the priority queue.
if priority is not given, priority is set to the value of item.
|
[
"Push",
"the",
"item",
"in",
"the",
"priority",
"queue",
".",
"if",
"priority",
"is",
"not",
"given",
"priority",
"is",
"set",
"to",
"the",
"value",
"of",
"item",
"."
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/queues/priority_queue.py#L38-L49
|
21,533
|
keon/algorithms
|
algorithms/arrays/flatten.py
|
flatten_iter
|
def flatten_iter(iterable):
"""
Takes as input multi dimensional iterable and
returns generator which produces one dimensional output.
"""
for element in iterable:
if isinstance(element, Iterable):
yield from flatten_iter(element)
else:
yield element
|
python
|
def flatten_iter(iterable):
"""
Takes as input multi dimensional iterable and
returns generator which produces one dimensional output.
"""
for element in iterable:
if isinstance(element, Iterable):
yield from flatten_iter(element)
else:
yield element
|
[
"def",
"flatten_iter",
"(",
"iterable",
")",
":",
"for",
"element",
"in",
"iterable",
":",
"if",
"isinstance",
"(",
"element",
",",
"Iterable",
")",
":",
"yield",
"from",
"flatten_iter",
"(",
"element",
")",
"else",
":",
"yield",
"element"
] |
Takes as input multi dimensional iterable and
returns generator which produces one dimensional output.
|
[
"Takes",
"as",
"input",
"multi",
"dimensional",
"iterable",
"and",
"returns",
"generator",
"which",
"produces",
"one",
"dimensional",
"output",
"."
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/arrays/flatten.py#L22-L31
|
21,534
|
keon/algorithms
|
algorithms/iterables/convolved.py
|
convolved
|
def convolved(iterable, kernel_size=1, stride=1, padding=0, default_value=None):
"""Iterable to get every convolution window per loop iteration.
For example:
`convolved([1, 2, 3, 4], kernel_size=2)`
will produce the following result:
`[[1, 2], [2, 3], [3, 4]]`.
`convolved([1, 2, 3], kernel_size=2, stride=1, padding=2, default_value=42)`
will produce the following result:
`[[42, 42], [42, 1], [1, 2], [2, 3], [3, 42], [42, 42]]`
Arguments:
iterable: An object to iterate on. It should support slice indexing if `padding == 0`.
kernel_size: The number of items yielded at every iteration.
stride: The step size between each iteration.
padding: Padding must be an integer or a string with value `SAME` or `VALID`. If it is an integer, it represents
how many values we add with `default_value` on the borders. If it is a string, `SAME` means that the
convolution will add some padding according to the kernel_size, and `VALID` is the same as
specifying `padding=0`.
default_value: Default fill value for padding and values outside iteration range.
For more information, refer to:
- https://github.com/guillaume-chevalier/python-conv-lib/blob/master/conv/conv.py
- https://github.com/guillaume-chevalier/python-conv-lib
- MIT License, Copyright (c) 2018 Guillaume Chevalier
"""
# Input validation and error messages
if not hasattr(iterable, '__iter__'):
raise ValueError(
"Can't iterate on object.".format(
iterable))
if stride < 1:
raise ValueError(
"Stride must be of at least one. Got `stride={}`.".format(
stride))
if not (padding in ['SAME', 'VALID'] or type(padding) in [int]):
raise ValueError(
"Padding must be an integer or a string with value `SAME` or `VALID`.")
if not isinstance(padding, str):
if padding < 0:
raise ValueError(
"Padding must be of at least zero. Got `padding={}`.".format(
padding))
else:
if padding == 'SAME':
padding = kernel_size // 2
elif padding == 'VALID':
padding = 0
if not type(iterable) == list:
iterable = list(iterable)
# Add padding to iterable
if padding > 0:
pad = [default_value] * padding
iterable = pad + list(iterable) + pad
# Fill missing value to the right
remainder = (kernel_size - len(iterable)) % stride
extra_pad = [default_value] * remainder
iterable = iterable + extra_pad
i = 0
while True:
if i > len(iterable) - kernel_size:
break
yield iterable[i:i + kernel_size]
i += stride
|
python
|
def convolved(iterable, kernel_size=1, stride=1, padding=0, default_value=None):
"""Iterable to get every convolution window per loop iteration.
For example:
`convolved([1, 2, 3, 4], kernel_size=2)`
will produce the following result:
`[[1, 2], [2, 3], [3, 4]]`.
`convolved([1, 2, 3], kernel_size=2, stride=1, padding=2, default_value=42)`
will produce the following result:
`[[42, 42], [42, 1], [1, 2], [2, 3], [3, 42], [42, 42]]`
Arguments:
iterable: An object to iterate on. It should support slice indexing if `padding == 0`.
kernel_size: The number of items yielded at every iteration.
stride: The step size between each iteration.
padding: Padding must be an integer or a string with value `SAME` or `VALID`. If it is an integer, it represents
how many values we add with `default_value` on the borders. If it is a string, `SAME` means that the
convolution will add some padding according to the kernel_size, and `VALID` is the same as
specifying `padding=0`.
default_value: Default fill value for padding and values outside iteration range.
For more information, refer to:
- https://github.com/guillaume-chevalier/python-conv-lib/blob/master/conv/conv.py
- https://github.com/guillaume-chevalier/python-conv-lib
- MIT License, Copyright (c) 2018 Guillaume Chevalier
"""
# Input validation and error messages
if not hasattr(iterable, '__iter__'):
raise ValueError(
"Can't iterate on object.".format(
iterable))
if stride < 1:
raise ValueError(
"Stride must be of at least one. Got `stride={}`.".format(
stride))
if not (padding in ['SAME', 'VALID'] or type(padding) in [int]):
raise ValueError(
"Padding must be an integer or a string with value `SAME` or `VALID`.")
if not isinstance(padding, str):
if padding < 0:
raise ValueError(
"Padding must be of at least zero. Got `padding={}`.".format(
padding))
else:
if padding == 'SAME':
padding = kernel_size // 2
elif padding == 'VALID':
padding = 0
if not type(iterable) == list:
iterable = list(iterable)
# Add padding to iterable
if padding > 0:
pad = [default_value] * padding
iterable = pad + list(iterable) + pad
# Fill missing value to the right
remainder = (kernel_size - len(iterable)) % stride
extra_pad = [default_value] * remainder
iterable = iterable + extra_pad
i = 0
while True:
if i > len(iterable) - kernel_size:
break
yield iterable[i:i + kernel_size]
i += stride
|
[
"def",
"convolved",
"(",
"iterable",
",",
"kernel_size",
"=",
"1",
",",
"stride",
"=",
"1",
",",
"padding",
"=",
"0",
",",
"default_value",
"=",
"None",
")",
":",
"# Input validation and error messages",
"if",
"not",
"hasattr",
"(",
"iterable",
",",
"'__iter__'",
")",
":",
"raise",
"ValueError",
"(",
"\"Can't iterate on object.\"",
".",
"format",
"(",
"iterable",
")",
")",
"if",
"stride",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"Stride must be of at least one. Got `stride={}`.\"",
".",
"format",
"(",
"stride",
")",
")",
"if",
"not",
"(",
"padding",
"in",
"[",
"'SAME'",
",",
"'VALID'",
"]",
"or",
"type",
"(",
"padding",
")",
"in",
"[",
"int",
"]",
")",
":",
"raise",
"ValueError",
"(",
"\"Padding must be an integer or a string with value `SAME` or `VALID`.\"",
")",
"if",
"not",
"isinstance",
"(",
"padding",
",",
"str",
")",
":",
"if",
"padding",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Padding must be of at least zero. Got `padding={}`.\"",
".",
"format",
"(",
"padding",
")",
")",
"else",
":",
"if",
"padding",
"==",
"'SAME'",
":",
"padding",
"=",
"kernel_size",
"//",
"2",
"elif",
"padding",
"==",
"'VALID'",
":",
"padding",
"=",
"0",
"if",
"not",
"type",
"(",
"iterable",
")",
"==",
"list",
":",
"iterable",
"=",
"list",
"(",
"iterable",
")",
"# Add padding to iterable",
"if",
"padding",
">",
"0",
":",
"pad",
"=",
"[",
"default_value",
"]",
"*",
"padding",
"iterable",
"=",
"pad",
"+",
"list",
"(",
"iterable",
")",
"+",
"pad",
"# Fill missing value to the right",
"remainder",
"=",
"(",
"kernel_size",
"-",
"len",
"(",
"iterable",
")",
")",
"%",
"stride",
"extra_pad",
"=",
"[",
"default_value",
"]",
"*",
"remainder",
"iterable",
"=",
"iterable",
"+",
"extra_pad",
"i",
"=",
"0",
"while",
"True",
":",
"if",
"i",
">",
"len",
"(",
"iterable",
")",
"-",
"kernel_size",
":",
"break",
"yield",
"iterable",
"[",
"i",
":",
"i",
"+",
"kernel_size",
"]",
"i",
"+=",
"stride"
] |
Iterable to get every convolution window per loop iteration.
For example:
`convolved([1, 2, 3, 4], kernel_size=2)`
will produce the following result:
`[[1, 2], [2, 3], [3, 4]]`.
`convolved([1, 2, 3], kernel_size=2, stride=1, padding=2, default_value=42)`
will produce the following result:
`[[42, 42], [42, 1], [1, 2], [2, 3], [3, 42], [42, 42]]`
Arguments:
iterable: An object to iterate on. It should support slice indexing if `padding == 0`.
kernel_size: The number of items yielded at every iteration.
stride: The step size between each iteration.
padding: Padding must be an integer or a string with value `SAME` or `VALID`. If it is an integer, it represents
how many values we add with `default_value` on the borders. If it is a string, `SAME` means that the
convolution will add some padding according to the kernel_size, and `VALID` is the same as
specifying `padding=0`.
default_value: Default fill value for padding and values outside iteration range.
For more information, refer to:
- https://github.com/guillaume-chevalier/python-conv-lib/blob/master/conv/conv.py
- https://github.com/guillaume-chevalier/python-conv-lib
- MIT License, Copyright (c) 2018 Guillaume Chevalier
|
[
"Iterable",
"to",
"get",
"every",
"convolution",
"window",
"per",
"loop",
"iteration",
"."
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/iterables/convolved.py#L28-L94
|
21,535
|
keon/algorithms
|
algorithms/iterables/convolved.py
|
convolved_1d
|
def convolved_1d(iterable, kernel_size=1, stride=1, padding=0, default_value=None):
"""1D Iterable to get every convolution window per loop iteration.
For more information, refer to:
- https://github.com/guillaume-chevalier/python-conv-lib/blob/master/conv/conv.py
- https://github.com/guillaume-chevalier/python-conv-lib
- MIT License, Copyright (c) 2018 Guillaume Chevalier
"""
return convolved(iterable, kernel_size, stride, padding, default_value)
|
python
|
def convolved_1d(iterable, kernel_size=1, stride=1, padding=0, default_value=None):
"""1D Iterable to get every convolution window per loop iteration.
For more information, refer to:
- https://github.com/guillaume-chevalier/python-conv-lib/blob/master/conv/conv.py
- https://github.com/guillaume-chevalier/python-conv-lib
- MIT License, Copyright (c) 2018 Guillaume Chevalier
"""
return convolved(iterable, kernel_size, stride, padding, default_value)
|
[
"def",
"convolved_1d",
"(",
"iterable",
",",
"kernel_size",
"=",
"1",
",",
"stride",
"=",
"1",
",",
"padding",
"=",
"0",
",",
"default_value",
"=",
"None",
")",
":",
"return",
"convolved",
"(",
"iterable",
",",
"kernel_size",
",",
"stride",
",",
"padding",
",",
"default_value",
")"
] |
1D Iterable to get every convolution window per loop iteration.
For more information, refer to:
- https://github.com/guillaume-chevalier/python-conv-lib/blob/master/conv/conv.py
- https://github.com/guillaume-chevalier/python-conv-lib
- MIT License, Copyright (c) 2018 Guillaume Chevalier
|
[
"1D",
"Iterable",
"to",
"get",
"every",
"convolution",
"window",
"per",
"loop",
"iteration",
"."
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/iterables/convolved.py#L96-L104
|
21,536
|
keon/algorithms
|
algorithms/iterables/convolved.py
|
convolved_2d
|
def convolved_2d(iterable, kernel_size=1, stride=1, padding=0, default_value=None):
"""2D Iterable to get every convolution window per loop iteration.
For more information, refer to:
- https://github.com/guillaume-chevalier/python-conv-lib/blob/master/conv/conv.py
- https://github.com/guillaume-chevalier/python-conv-lib
- MIT License, Copyright (c) 2018 Guillaume Chevalier
"""
kernel_size = dimensionize(kernel_size, nd=2)
stride = dimensionize(stride, nd=2)
padding = dimensionize(padding, nd=2)
for row_packet in convolved(iterable, kernel_size[0], stride[0], padding[0], default_value):
transposed_inner = []
for col in tuple(row_packet):
transposed_inner.append(list(
convolved(col, kernel_size[1], stride[1], padding[1], default_value)
))
if len(transposed_inner) > 0:
for col_i in range(len(transposed_inner[0])):
yield tuple(row_j[col_i] for row_j in transposed_inner)
|
python
|
def convolved_2d(iterable, kernel_size=1, stride=1, padding=0, default_value=None):
"""2D Iterable to get every convolution window per loop iteration.
For more information, refer to:
- https://github.com/guillaume-chevalier/python-conv-lib/blob/master/conv/conv.py
- https://github.com/guillaume-chevalier/python-conv-lib
- MIT License, Copyright (c) 2018 Guillaume Chevalier
"""
kernel_size = dimensionize(kernel_size, nd=2)
stride = dimensionize(stride, nd=2)
padding = dimensionize(padding, nd=2)
for row_packet in convolved(iterable, kernel_size[0], stride[0], padding[0], default_value):
transposed_inner = []
for col in tuple(row_packet):
transposed_inner.append(list(
convolved(col, kernel_size[1], stride[1], padding[1], default_value)
))
if len(transposed_inner) > 0:
for col_i in range(len(transposed_inner[0])):
yield tuple(row_j[col_i] for row_j in transposed_inner)
|
[
"def",
"convolved_2d",
"(",
"iterable",
",",
"kernel_size",
"=",
"1",
",",
"stride",
"=",
"1",
",",
"padding",
"=",
"0",
",",
"default_value",
"=",
"None",
")",
":",
"kernel_size",
"=",
"dimensionize",
"(",
"kernel_size",
",",
"nd",
"=",
"2",
")",
"stride",
"=",
"dimensionize",
"(",
"stride",
",",
"nd",
"=",
"2",
")",
"padding",
"=",
"dimensionize",
"(",
"padding",
",",
"nd",
"=",
"2",
")",
"for",
"row_packet",
"in",
"convolved",
"(",
"iterable",
",",
"kernel_size",
"[",
"0",
"]",
",",
"stride",
"[",
"0",
"]",
",",
"padding",
"[",
"0",
"]",
",",
"default_value",
")",
":",
"transposed_inner",
"=",
"[",
"]",
"for",
"col",
"in",
"tuple",
"(",
"row_packet",
")",
":",
"transposed_inner",
".",
"append",
"(",
"list",
"(",
"convolved",
"(",
"col",
",",
"kernel_size",
"[",
"1",
"]",
",",
"stride",
"[",
"1",
"]",
",",
"padding",
"[",
"1",
"]",
",",
"default_value",
")",
")",
")",
"if",
"len",
"(",
"transposed_inner",
")",
">",
"0",
":",
"for",
"col_i",
"in",
"range",
"(",
"len",
"(",
"transposed_inner",
"[",
"0",
"]",
")",
")",
":",
"yield",
"tuple",
"(",
"row_j",
"[",
"col_i",
"]",
"for",
"row_j",
"in",
"transposed_inner",
")"
] |
2D Iterable to get every convolution window per loop iteration.
For more information, refer to:
- https://github.com/guillaume-chevalier/python-conv-lib/blob/master/conv/conv.py
- https://github.com/guillaume-chevalier/python-conv-lib
- MIT License, Copyright (c) 2018 Guillaume Chevalier
|
[
"2D",
"Iterable",
"to",
"get",
"every",
"convolution",
"window",
"per",
"loop",
"iteration",
"."
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/iterables/convolved.py#L107-L128
|
21,537
|
keon/algorithms
|
algorithms/iterables/convolved.py
|
dimensionize
|
def dimensionize(maybe_a_list, nd=2):
"""Convert integers to a list of integers to fit the number of dimensions if
the argument is not already a list.
For example:
`dimensionize(3, nd=2)`
will produce the following result:
`(3, 3)`.
`dimensionize([3, 1], nd=2)`
will produce the following result:
`[3, 1]`.
For more information, refer to:
- https://github.com/guillaume-chevalier/python-conv-lib/blob/master/conv/conv.py
- https://github.com/guillaume-chevalier/python-conv-lib
- MIT License, Copyright (c) 2018 Guillaume Chevalier
"""
if not hasattr(maybe_a_list, '__iter__'):
# Argument is probably an integer so we map it to a list of size `nd`.
now_a_list = [maybe_a_list] * nd
return now_a_list
else:
# Argument is probably an `nd`-sized list.
return maybe_a_list
|
python
|
def dimensionize(maybe_a_list, nd=2):
"""Convert integers to a list of integers to fit the number of dimensions if
the argument is not already a list.
For example:
`dimensionize(3, nd=2)`
will produce the following result:
`(3, 3)`.
`dimensionize([3, 1], nd=2)`
will produce the following result:
`[3, 1]`.
For more information, refer to:
- https://github.com/guillaume-chevalier/python-conv-lib/blob/master/conv/conv.py
- https://github.com/guillaume-chevalier/python-conv-lib
- MIT License, Copyright (c) 2018 Guillaume Chevalier
"""
if not hasattr(maybe_a_list, '__iter__'):
# Argument is probably an integer so we map it to a list of size `nd`.
now_a_list = [maybe_a_list] * nd
return now_a_list
else:
# Argument is probably an `nd`-sized list.
return maybe_a_list
|
[
"def",
"dimensionize",
"(",
"maybe_a_list",
",",
"nd",
"=",
"2",
")",
":",
"if",
"not",
"hasattr",
"(",
"maybe_a_list",
",",
"'__iter__'",
")",
":",
"# Argument is probably an integer so we map it to a list of size `nd`.",
"now_a_list",
"=",
"[",
"maybe_a_list",
"]",
"*",
"nd",
"return",
"now_a_list",
"else",
":",
"# Argument is probably an `nd`-sized list.",
"return",
"maybe_a_list"
] |
Convert integers to a list of integers to fit the number of dimensions if
the argument is not already a list.
For example:
`dimensionize(3, nd=2)`
will produce the following result:
`(3, 3)`.
`dimensionize([3, 1], nd=2)`
will produce the following result:
`[3, 1]`.
For more information, refer to:
- https://github.com/guillaume-chevalier/python-conv-lib/blob/master/conv/conv.py
- https://github.com/guillaume-chevalier/python-conv-lib
- MIT License, Copyright (c) 2018 Guillaume Chevalier
|
[
"Convert",
"integers",
"to",
"a",
"list",
"of",
"integers",
"to",
"fit",
"the",
"number",
"of",
"dimensions",
"if",
"the",
"argument",
"is",
"not",
"already",
"a",
"list",
"."
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/iterables/convolved.py#L131-L154
|
21,538
|
keon/algorithms
|
algorithms/arrays/merge_intervals.py
|
merge_intervals
|
def merge_intervals(intervals):
""" Merge intervals in the form of a list. """
if intervals is None:
return None
intervals.sort(key=lambda i: i[0])
out = [intervals.pop(0)]
for i in intervals:
if out[-1][-1] >= i[0]:
out[-1][-1] = max(out[-1][-1], i[-1])
else:
out.append(i)
return out
|
python
|
def merge_intervals(intervals):
""" Merge intervals in the form of a list. """
if intervals is None:
return None
intervals.sort(key=lambda i: i[0])
out = [intervals.pop(0)]
for i in intervals:
if out[-1][-1] >= i[0]:
out[-1][-1] = max(out[-1][-1], i[-1])
else:
out.append(i)
return out
|
[
"def",
"merge_intervals",
"(",
"intervals",
")",
":",
"if",
"intervals",
"is",
"None",
":",
"return",
"None",
"intervals",
".",
"sort",
"(",
"key",
"=",
"lambda",
"i",
":",
"i",
"[",
"0",
"]",
")",
"out",
"=",
"[",
"intervals",
".",
"pop",
"(",
"0",
")",
"]",
"for",
"i",
"in",
"intervals",
":",
"if",
"out",
"[",
"-",
"1",
"]",
"[",
"-",
"1",
"]",
">=",
"i",
"[",
"0",
"]",
":",
"out",
"[",
"-",
"1",
"]",
"[",
"-",
"1",
"]",
"=",
"max",
"(",
"out",
"[",
"-",
"1",
"]",
"[",
"-",
"1",
"]",
",",
"i",
"[",
"-",
"1",
"]",
")",
"else",
":",
"out",
".",
"append",
"(",
"i",
")",
"return",
"out"
] |
Merge intervals in the form of a list.
|
[
"Merge",
"intervals",
"in",
"the",
"form",
"of",
"a",
"list",
"."
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/arrays/merge_intervals.py#L66-L77
|
21,539
|
keon/algorithms
|
algorithms/arrays/merge_intervals.py
|
Interval.merge
|
def merge(intervals):
""" Merge two intervals into one. """
out = []
for i in sorted(intervals, key=lambda i: i.start):
if out and i.start <= out[-1].end:
out[-1].end = max(out[-1].end, i.end)
else:
out += i,
return out
|
python
|
def merge(intervals):
""" Merge two intervals into one. """
out = []
for i in sorted(intervals, key=lambda i: i.start):
if out and i.start <= out[-1].end:
out[-1].end = max(out[-1].end, i.end)
else:
out += i,
return out
|
[
"def",
"merge",
"(",
"intervals",
")",
":",
"out",
"=",
"[",
"]",
"for",
"i",
"in",
"sorted",
"(",
"intervals",
",",
"key",
"=",
"lambda",
"i",
":",
"i",
".",
"start",
")",
":",
"if",
"out",
"and",
"i",
".",
"start",
"<=",
"out",
"[",
"-",
"1",
"]",
".",
"end",
":",
"out",
"[",
"-",
"1",
"]",
".",
"end",
"=",
"max",
"(",
"out",
"[",
"-",
"1",
"]",
".",
"end",
",",
"i",
".",
"end",
")",
"else",
":",
"out",
"+=",
"i",
",",
"return",
"out"
] |
Merge two intervals into one.
|
[
"Merge",
"two",
"intervals",
"into",
"one",
"."
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/arrays/merge_intervals.py#L47-L55
|
21,540
|
keon/algorithms
|
algorithms/arrays/merge_intervals.py
|
Interval.print_intervals
|
def print_intervals(intervals):
""" Print out the intervals. """
res = []
for i in intervals:
res.append(repr(i))
print("".join(res))
|
python
|
def print_intervals(intervals):
""" Print out the intervals. """
res = []
for i in intervals:
res.append(repr(i))
print("".join(res))
|
[
"def",
"print_intervals",
"(",
"intervals",
")",
":",
"res",
"=",
"[",
"]",
"for",
"i",
"in",
"intervals",
":",
"res",
".",
"append",
"(",
"repr",
"(",
"i",
")",
")",
"print",
"(",
"\"\"",
".",
"join",
"(",
"res",
")",
")"
] |
Print out the intervals.
|
[
"Print",
"out",
"the",
"intervals",
"."
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/arrays/merge_intervals.py#L58-L63
|
21,541
|
keon/algorithms
|
algorithms/sort/heap_sort.py
|
max_heapify
|
def max_heapify(arr, end, simulation, iteration):
""" Max heapify helper for max_heap_sort
"""
last_parent = (end - 1) // 2
# Iterate from last parent to first
for parent in range(last_parent, -1, -1):
current_parent = parent
# Iterate from current_parent to last_parent
while current_parent <= last_parent:
# Find greatest child of current_parent
child = 2 * current_parent + 1
if child + 1 <= end and arr[child] < arr[child + 1]:
child = child + 1
# Swap if child is greater than parent
if arr[child] > arr[current_parent]:
arr[current_parent], arr[child] = arr[child], arr[current_parent]
current_parent = child
if simulation:
iteration = iteration + 1
print("iteration",iteration,":",*arr)
# If no swap occured, no need to keep iterating
else:
break
arr[0], arr[end] = arr[end], arr[0]
return iteration
|
python
|
def max_heapify(arr, end, simulation, iteration):
""" Max heapify helper for max_heap_sort
"""
last_parent = (end - 1) // 2
# Iterate from last parent to first
for parent in range(last_parent, -1, -1):
current_parent = parent
# Iterate from current_parent to last_parent
while current_parent <= last_parent:
# Find greatest child of current_parent
child = 2 * current_parent + 1
if child + 1 <= end and arr[child] < arr[child + 1]:
child = child + 1
# Swap if child is greater than parent
if arr[child] > arr[current_parent]:
arr[current_parent], arr[child] = arr[child], arr[current_parent]
current_parent = child
if simulation:
iteration = iteration + 1
print("iteration",iteration,":",*arr)
# If no swap occured, no need to keep iterating
else:
break
arr[0], arr[end] = arr[end], arr[0]
return iteration
|
[
"def",
"max_heapify",
"(",
"arr",
",",
"end",
",",
"simulation",
",",
"iteration",
")",
":",
"last_parent",
"=",
"(",
"end",
"-",
"1",
")",
"//",
"2",
"# Iterate from last parent to first",
"for",
"parent",
"in",
"range",
"(",
"last_parent",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"current_parent",
"=",
"parent",
"# Iterate from current_parent to last_parent",
"while",
"current_parent",
"<=",
"last_parent",
":",
"# Find greatest child of current_parent",
"child",
"=",
"2",
"*",
"current_parent",
"+",
"1",
"if",
"child",
"+",
"1",
"<=",
"end",
"and",
"arr",
"[",
"child",
"]",
"<",
"arr",
"[",
"child",
"+",
"1",
"]",
":",
"child",
"=",
"child",
"+",
"1",
"# Swap if child is greater than parent",
"if",
"arr",
"[",
"child",
"]",
">",
"arr",
"[",
"current_parent",
"]",
":",
"arr",
"[",
"current_parent",
"]",
",",
"arr",
"[",
"child",
"]",
"=",
"arr",
"[",
"child",
"]",
",",
"arr",
"[",
"current_parent",
"]",
"current_parent",
"=",
"child",
"if",
"simulation",
":",
"iteration",
"=",
"iteration",
"+",
"1",
"print",
"(",
"\"iteration\"",
",",
"iteration",
",",
"\":\"",
",",
"*",
"arr",
")",
"# If no swap occured, no need to keep iterating",
"else",
":",
"break",
"arr",
"[",
"0",
"]",
",",
"arr",
"[",
"end",
"]",
"=",
"arr",
"[",
"end",
"]",
",",
"arr",
"[",
"0",
"]",
"return",
"iteration"
] |
Max heapify helper for max_heap_sort
|
[
"Max",
"heapify",
"helper",
"for",
"max_heap_sort"
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/sort/heap_sort.py#L18-L45
|
21,542
|
keon/algorithms
|
algorithms/sort/heap_sort.py
|
min_heapify
|
def min_heapify(arr, start, simulation, iteration):
""" Min heapify helper for min_heap_sort
"""
# Offset last_parent by the start (last_parent calculated as if start index was 0)
# All array accesses need to be offset by start
end = len(arr) - 1
last_parent = (end - start - 1) // 2
# Iterate from last parent to first
for parent in range(last_parent, -1, -1):
current_parent = parent
# Iterate from current_parent to last_parent
while current_parent <= last_parent:
# Find lesser child of current_parent
child = 2 * current_parent + 1
if child + 1 <= end - start and arr[child + start] > arr[
child + 1 + start]:
child = child + 1
# Swap if child is less than parent
if arr[child + start] < arr[current_parent + start]:
arr[current_parent + start], arr[child + start] = \
arr[child + start], arr[current_parent + start]
current_parent = child
if simulation:
iteration = iteration + 1
print("iteration",iteration,":",*arr)
# If no swap occured, no need to keep iterating
else:
break
return iteration
|
python
|
def min_heapify(arr, start, simulation, iteration):
""" Min heapify helper for min_heap_sort
"""
# Offset last_parent by the start (last_parent calculated as if start index was 0)
# All array accesses need to be offset by start
end = len(arr) - 1
last_parent = (end - start - 1) // 2
# Iterate from last parent to first
for parent in range(last_parent, -1, -1):
current_parent = parent
# Iterate from current_parent to last_parent
while current_parent <= last_parent:
# Find lesser child of current_parent
child = 2 * current_parent + 1
if child + 1 <= end - start and arr[child + start] > arr[
child + 1 + start]:
child = child + 1
# Swap if child is less than parent
if arr[child + start] < arr[current_parent + start]:
arr[current_parent + start], arr[child + start] = \
arr[child + start], arr[current_parent + start]
current_parent = child
if simulation:
iteration = iteration + 1
print("iteration",iteration,":",*arr)
# If no swap occured, no need to keep iterating
else:
break
return iteration
|
[
"def",
"min_heapify",
"(",
"arr",
",",
"start",
",",
"simulation",
",",
"iteration",
")",
":",
"# Offset last_parent by the start (last_parent calculated as if start index was 0)",
"# All array accesses need to be offset by start",
"end",
"=",
"len",
"(",
"arr",
")",
"-",
"1",
"last_parent",
"=",
"(",
"end",
"-",
"start",
"-",
"1",
")",
"//",
"2",
"# Iterate from last parent to first",
"for",
"parent",
"in",
"range",
"(",
"last_parent",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"current_parent",
"=",
"parent",
"# Iterate from current_parent to last_parent",
"while",
"current_parent",
"<=",
"last_parent",
":",
"# Find lesser child of current_parent",
"child",
"=",
"2",
"*",
"current_parent",
"+",
"1",
"if",
"child",
"+",
"1",
"<=",
"end",
"-",
"start",
"and",
"arr",
"[",
"child",
"+",
"start",
"]",
">",
"arr",
"[",
"child",
"+",
"1",
"+",
"start",
"]",
":",
"child",
"=",
"child",
"+",
"1",
"# Swap if child is less than parent",
"if",
"arr",
"[",
"child",
"+",
"start",
"]",
"<",
"arr",
"[",
"current_parent",
"+",
"start",
"]",
":",
"arr",
"[",
"current_parent",
"+",
"start",
"]",
",",
"arr",
"[",
"child",
"+",
"start",
"]",
"=",
"arr",
"[",
"child",
"+",
"start",
"]",
",",
"arr",
"[",
"current_parent",
"+",
"start",
"]",
"current_parent",
"=",
"child",
"if",
"simulation",
":",
"iteration",
"=",
"iteration",
"+",
"1",
"print",
"(",
"\"iteration\"",
",",
"iteration",
",",
"\":\"",
",",
"*",
"arr",
")",
"# If no swap occured, no need to keep iterating",
"else",
":",
"break",
"return",
"iteration"
] |
Min heapify helper for min_heap_sort
|
[
"Min",
"heapify",
"helper",
"for",
"min_heap_sort"
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/sort/heap_sort.py#L61-L92
|
21,543
|
keon/algorithms
|
algorithms/maths/rsa.py
|
generate_key
|
def generate_key(k, seed=None):
"""
the RSA key generating algorithm
k is the number of bits in n
"""
def modinv(a, m):
"""calculate the inverse of a mod m
that is, find b such that (a * b) % m == 1"""
b = 1
while not (a * b) % m == 1:
b += 1
return b
def gen_prime(k, seed=None):
"""generate a prime with k bits"""
def is_prime(num):
if num == 2:
return True
for i in range(2, int(num ** 0.5) + 1):
if num % i == 0:
return False
return True
random.seed(seed)
while True:
key = random.randrange(int(2 ** (k - 1)), int(2 ** k))
if is_prime(key):
return key
# size in bits of p and q need to add up to the size of n
p_size = k / 2
q_size = k - p_size
e = gen_prime(k, seed) # in many cases, e is also chosen to be a small constant
while True:
p = gen_prime(p_size, seed)
if p % e != 1:
break
while True:
q = gen_prime(q_size, seed)
if q % e != 1:
break
n = p * q
l = (p - 1) * (q - 1) # calculate totient function
d = modinv(e, l)
return int(n), int(e), int(d)
|
python
|
def generate_key(k, seed=None):
"""
the RSA key generating algorithm
k is the number of bits in n
"""
def modinv(a, m):
"""calculate the inverse of a mod m
that is, find b such that (a * b) % m == 1"""
b = 1
while not (a * b) % m == 1:
b += 1
return b
def gen_prime(k, seed=None):
"""generate a prime with k bits"""
def is_prime(num):
if num == 2:
return True
for i in range(2, int(num ** 0.5) + 1):
if num % i == 0:
return False
return True
random.seed(seed)
while True:
key = random.randrange(int(2 ** (k - 1)), int(2 ** k))
if is_prime(key):
return key
# size in bits of p and q need to add up to the size of n
p_size = k / 2
q_size = k - p_size
e = gen_prime(k, seed) # in many cases, e is also chosen to be a small constant
while True:
p = gen_prime(p_size, seed)
if p % e != 1:
break
while True:
q = gen_prime(q_size, seed)
if q % e != 1:
break
n = p * q
l = (p - 1) * (q - 1) # calculate totient function
d = modinv(e, l)
return int(n), int(e), int(d)
|
[
"def",
"generate_key",
"(",
"k",
",",
"seed",
"=",
"None",
")",
":",
"def",
"modinv",
"(",
"a",
",",
"m",
")",
":",
"\"\"\"calculate the inverse of a mod m\n that is, find b such that (a * b) % m == 1\"\"\"",
"b",
"=",
"1",
"while",
"not",
"(",
"a",
"*",
"b",
")",
"%",
"m",
"==",
"1",
":",
"b",
"+=",
"1",
"return",
"b",
"def",
"gen_prime",
"(",
"k",
",",
"seed",
"=",
"None",
")",
":",
"\"\"\"generate a prime with k bits\"\"\"",
"def",
"is_prime",
"(",
"num",
")",
":",
"if",
"num",
"==",
"2",
":",
"return",
"True",
"for",
"i",
"in",
"range",
"(",
"2",
",",
"int",
"(",
"num",
"**",
"0.5",
")",
"+",
"1",
")",
":",
"if",
"num",
"%",
"i",
"==",
"0",
":",
"return",
"False",
"return",
"True",
"random",
".",
"seed",
"(",
"seed",
")",
"while",
"True",
":",
"key",
"=",
"random",
".",
"randrange",
"(",
"int",
"(",
"2",
"**",
"(",
"k",
"-",
"1",
")",
")",
",",
"int",
"(",
"2",
"**",
"k",
")",
")",
"if",
"is_prime",
"(",
"key",
")",
":",
"return",
"key",
"# size in bits of p and q need to add up to the size of n",
"p_size",
"=",
"k",
"/",
"2",
"q_size",
"=",
"k",
"-",
"p_size",
"e",
"=",
"gen_prime",
"(",
"k",
",",
"seed",
")",
"# in many cases, e is also chosen to be a small constant",
"while",
"True",
":",
"p",
"=",
"gen_prime",
"(",
"p_size",
",",
"seed",
")",
"if",
"p",
"%",
"e",
"!=",
"1",
":",
"break",
"while",
"True",
":",
"q",
"=",
"gen_prime",
"(",
"q_size",
",",
"seed",
")",
"if",
"q",
"%",
"e",
"!=",
"1",
":",
"break",
"n",
"=",
"p",
"*",
"q",
"l",
"=",
"(",
"p",
"-",
"1",
")",
"*",
"(",
"q",
"-",
"1",
")",
"# calculate totient function",
"d",
"=",
"modinv",
"(",
"e",
",",
"l",
")",
"return",
"int",
"(",
"n",
")",
",",
"int",
"(",
"e",
")",
",",
"int",
"(",
"d",
")"
] |
the RSA key generating algorithm
k is the number of bits in n
|
[
"the",
"RSA",
"key",
"generating",
"algorithm",
"k",
"is",
"the",
"number",
"of",
"bits",
"in",
"n"
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/maths/rsa.py#L27-L78
|
21,544
|
keon/algorithms
|
algorithms/maths/sqrt_precision_factor.py
|
square_root
|
def square_root(n, epsilon=0.001):
"""Return square root of n, with maximum absolute error epsilon"""
guess = n / 2
while abs(guess * guess - n) > epsilon:
guess = (guess + (n / guess)) / 2
return guess
|
python
|
def square_root(n, epsilon=0.001):
"""Return square root of n, with maximum absolute error epsilon"""
guess = n / 2
while abs(guess * guess - n) > epsilon:
guess = (guess + (n / guess)) / 2
return guess
|
[
"def",
"square_root",
"(",
"n",
",",
"epsilon",
"=",
"0.001",
")",
":",
"guess",
"=",
"n",
"/",
"2",
"while",
"abs",
"(",
"guess",
"*",
"guess",
"-",
"n",
")",
">",
"epsilon",
":",
"guess",
"=",
"(",
"guess",
"+",
"(",
"n",
"/",
"guess",
")",
")",
"/",
"2",
"return",
"guess"
] |
Return square root of n, with maximum absolute error epsilon
|
[
"Return",
"square",
"root",
"of",
"n",
"with",
"maximum",
"absolute",
"error",
"epsilon"
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/maths/sqrt_precision_factor.py#L12-L19
|
21,545
|
keon/algorithms
|
algorithms/set/set_covering.py
|
powerset
|
def powerset(iterable):
"""Calculate the powerset of any iterable.
For a range of integers up to the length of the given list,
make all possible combinations and chain them together as one object.
From https://docs.python.org/3/library/itertools.html#itertools-recipes
"""
"list(powerset([1,2,3])) --> [(), (1,), (2,), (3,), (1,2), (1,3), (2,3), (1,2,3)]"
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
|
python
|
def powerset(iterable):
"""Calculate the powerset of any iterable.
For a range of integers up to the length of the given list,
make all possible combinations and chain them together as one object.
From https://docs.python.org/3/library/itertools.html#itertools-recipes
"""
"list(powerset([1,2,3])) --> [(), (1,), (2,), (3,), (1,2), (1,3), (2,3), (1,2,3)]"
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
|
[
"def",
"powerset",
"(",
"iterable",
")",
":",
"\"list(powerset([1,2,3])) --> [(), (1,), (2,), (3,), (1,2), (1,3), (2,3), (1,2,3)]\"",
"s",
"=",
"list",
"(",
"iterable",
")",
"return",
"chain",
".",
"from_iterable",
"(",
"combinations",
"(",
"s",
",",
"r",
")",
"for",
"r",
"in",
"range",
"(",
"len",
"(",
"s",
")",
"+",
"1",
")",
")"
] |
Calculate the powerset of any iterable.
For a range of integers up to the length of the given list,
make all possible combinations and chain them together as one object.
From https://docs.python.org/3/library/itertools.html#itertools-recipes
|
[
"Calculate",
"the",
"powerset",
"of",
"any",
"iterable",
"."
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/set/set_covering.py#L25-L34
|
21,546
|
keon/algorithms
|
algorithms/set/set_covering.py
|
greedy_set_cover
|
def greedy_set_cover(universe, subsets, costs):
"""Approximate greedy algorithm for set-covering. Can be used on large
inputs - though not an optimal solution.
Args:
universe (list): Universe of elements
subsets (dict): Subsets of U {S1:elements,S2:elements}
costs (dict): Costs of each subset in S - {S1:cost, S2:cost...}
"""
elements = set(e for s in subsets.keys() for e in subsets[s])
# elements don't cover universe -> invalid input for set cover
if elements != universe:
return None
# track elements of universe covered
covered = set()
cover_sets = []
while covered != universe:
min_cost_elem_ratio = float("inf")
min_set = None
# find set with minimum cost:elements_added ratio
for s, elements in subsets.items():
new_elements = len(elements - covered)
# set may have same elements as already covered -> new_elements = 0
# check to avoid division by 0 error
if new_elements != 0:
cost_elem_ratio = costs[s] / new_elements
if cost_elem_ratio < min_cost_elem_ratio:
min_cost_elem_ratio = cost_elem_ratio
min_set = s
cover_sets.append(min_set)
# union
covered |= subsets[min_set]
return cover_sets
|
python
|
def greedy_set_cover(universe, subsets, costs):
"""Approximate greedy algorithm for set-covering. Can be used on large
inputs - though not an optimal solution.
Args:
universe (list): Universe of elements
subsets (dict): Subsets of U {S1:elements,S2:elements}
costs (dict): Costs of each subset in S - {S1:cost, S2:cost...}
"""
elements = set(e for s in subsets.keys() for e in subsets[s])
# elements don't cover universe -> invalid input for set cover
if elements != universe:
return None
# track elements of universe covered
covered = set()
cover_sets = []
while covered != universe:
min_cost_elem_ratio = float("inf")
min_set = None
# find set with minimum cost:elements_added ratio
for s, elements in subsets.items():
new_elements = len(elements - covered)
# set may have same elements as already covered -> new_elements = 0
# check to avoid division by 0 error
if new_elements != 0:
cost_elem_ratio = costs[s] / new_elements
if cost_elem_ratio < min_cost_elem_ratio:
min_cost_elem_ratio = cost_elem_ratio
min_set = s
cover_sets.append(min_set)
# union
covered |= subsets[min_set]
return cover_sets
|
[
"def",
"greedy_set_cover",
"(",
"universe",
",",
"subsets",
",",
"costs",
")",
":",
"elements",
"=",
"set",
"(",
"e",
"for",
"s",
"in",
"subsets",
".",
"keys",
"(",
")",
"for",
"e",
"in",
"subsets",
"[",
"s",
"]",
")",
"# elements don't cover universe -> invalid input for set cover",
"if",
"elements",
"!=",
"universe",
":",
"return",
"None",
"# track elements of universe covered",
"covered",
"=",
"set",
"(",
")",
"cover_sets",
"=",
"[",
"]",
"while",
"covered",
"!=",
"universe",
":",
"min_cost_elem_ratio",
"=",
"float",
"(",
"\"inf\"",
")",
"min_set",
"=",
"None",
"# find set with minimum cost:elements_added ratio",
"for",
"s",
",",
"elements",
"in",
"subsets",
".",
"items",
"(",
")",
":",
"new_elements",
"=",
"len",
"(",
"elements",
"-",
"covered",
")",
"# set may have same elements as already covered -> new_elements = 0",
"# check to avoid division by 0 error",
"if",
"new_elements",
"!=",
"0",
":",
"cost_elem_ratio",
"=",
"costs",
"[",
"s",
"]",
"/",
"new_elements",
"if",
"cost_elem_ratio",
"<",
"min_cost_elem_ratio",
":",
"min_cost_elem_ratio",
"=",
"cost_elem_ratio",
"min_set",
"=",
"s",
"cover_sets",
".",
"append",
"(",
"min_set",
")",
"# union",
"covered",
"|=",
"subsets",
"[",
"min_set",
"]",
"return",
"cover_sets"
] |
Approximate greedy algorithm for set-covering. Can be used on large
inputs - though not an optimal solution.
Args:
universe (list): Universe of elements
subsets (dict): Subsets of U {S1:elements,S2:elements}
costs (dict): Costs of each subset in S - {S1:cost, S2:cost...}
|
[
"Approximate",
"greedy",
"algorithm",
"for",
"set",
"-",
"covering",
".",
"Can",
"be",
"used",
"on",
"large",
"inputs",
"-",
"though",
"not",
"an",
"optimal",
"solution",
"."
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/set/set_covering.py#L61-L95
|
21,547
|
keon/algorithms
|
algorithms/tree/avl/avl.py
|
AvlTree.insert
|
def insert(self, key):
"""
Insert new key into node
"""
# Create new node
n = TreeNode(key)
if not self.node:
self.node = n
self.node.left = AvlTree()
self.node.right = AvlTree()
elif key < self.node.val:
self.node.left.insert(key)
elif key > self.node.val:
self.node.right.insert(key)
self.re_balance()
|
python
|
def insert(self, key):
"""
Insert new key into node
"""
# Create new node
n = TreeNode(key)
if not self.node:
self.node = n
self.node.left = AvlTree()
self.node.right = AvlTree()
elif key < self.node.val:
self.node.left.insert(key)
elif key > self.node.val:
self.node.right.insert(key)
self.re_balance()
|
[
"def",
"insert",
"(",
"self",
",",
"key",
")",
":",
"# Create new node",
"n",
"=",
"TreeNode",
"(",
"key",
")",
"if",
"not",
"self",
".",
"node",
":",
"self",
".",
"node",
"=",
"n",
"self",
".",
"node",
".",
"left",
"=",
"AvlTree",
"(",
")",
"self",
".",
"node",
".",
"right",
"=",
"AvlTree",
"(",
")",
"elif",
"key",
"<",
"self",
".",
"node",
".",
"val",
":",
"self",
".",
"node",
".",
"left",
".",
"insert",
"(",
"key",
")",
"elif",
"key",
">",
"self",
".",
"node",
".",
"val",
":",
"self",
".",
"node",
".",
"right",
".",
"insert",
"(",
"key",
")",
"self",
".",
"re_balance",
"(",
")"
] |
Insert new key into node
|
[
"Insert",
"new",
"key",
"into",
"node"
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/tree/avl/avl.py#L15-L29
|
21,548
|
keon/algorithms
|
algorithms/tree/avl/avl.py
|
AvlTree.re_balance
|
def re_balance(self):
"""
Re balance tree. After inserting or deleting a node,
"""
self.update_heights(recursive=False)
self.update_balances(False)
while self.balance < -1 or self.balance > 1:
if self.balance > 1:
if self.node.left.balance < 0:
self.node.left.rotate_left()
self.update_heights()
self.update_balances()
self.rotate_right()
self.update_heights()
self.update_balances()
if self.balance < -1:
if self.node.right.balance > 0:
self.node.right.rotate_right()
self.update_heights()
self.update_balances()
self.rotate_left()
self.update_heights()
self.update_balances()
|
python
|
def re_balance(self):
"""
Re balance tree. After inserting or deleting a node,
"""
self.update_heights(recursive=False)
self.update_balances(False)
while self.balance < -1 or self.balance > 1:
if self.balance > 1:
if self.node.left.balance < 0:
self.node.left.rotate_left()
self.update_heights()
self.update_balances()
self.rotate_right()
self.update_heights()
self.update_balances()
if self.balance < -1:
if self.node.right.balance > 0:
self.node.right.rotate_right()
self.update_heights()
self.update_balances()
self.rotate_left()
self.update_heights()
self.update_balances()
|
[
"def",
"re_balance",
"(",
"self",
")",
":",
"self",
".",
"update_heights",
"(",
"recursive",
"=",
"False",
")",
"self",
".",
"update_balances",
"(",
"False",
")",
"while",
"self",
".",
"balance",
"<",
"-",
"1",
"or",
"self",
".",
"balance",
">",
"1",
":",
"if",
"self",
".",
"balance",
">",
"1",
":",
"if",
"self",
".",
"node",
".",
"left",
".",
"balance",
"<",
"0",
":",
"self",
".",
"node",
".",
"left",
".",
"rotate_left",
"(",
")",
"self",
".",
"update_heights",
"(",
")",
"self",
".",
"update_balances",
"(",
")",
"self",
".",
"rotate_right",
"(",
")",
"self",
".",
"update_heights",
"(",
")",
"self",
".",
"update_balances",
"(",
")",
"if",
"self",
".",
"balance",
"<",
"-",
"1",
":",
"if",
"self",
".",
"node",
".",
"right",
".",
"balance",
">",
"0",
":",
"self",
".",
"node",
".",
"right",
".",
"rotate_right",
"(",
")",
"self",
".",
"update_heights",
"(",
")",
"self",
".",
"update_balances",
"(",
")",
"self",
".",
"rotate_left",
"(",
")",
"self",
".",
"update_heights",
"(",
")",
"self",
".",
"update_balances",
"(",
")"
] |
Re balance tree. After inserting or deleting a node,
|
[
"Re",
"balance",
"tree",
".",
"After",
"inserting",
"or",
"deleting",
"a",
"node"
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/tree/avl/avl.py#L31-L55
|
21,549
|
keon/algorithms
|
algorithms/tree/avl/avl.py
|
AvlTree.update_heights
|
def update_heights(self, recursive=True):
"""
Update tree height
"""
if self.node:
if recursive:
if self.node.left:
self.node.left.update_heights()
if self.node.right:
self.node.right.update_heights()
self.height = 1 + max(self.node.left.height, self.node.right.height)
else:
self.height = -1
|
python
|
def update_heights(self, recursive=True):
"""
Update tree height
"""
if self.node:
if recursive:
if self.node.left:
self.node.left.update_heights()
if self.node.right:
self.node.right.update_heights()
self.height = 1 + max(self.node.left.height, self.node.right.height)
else:
self.height = -1
|
[
"def",
"update_heights",
"(",
"self",
",",
"recursive",
"=",
"True",
")",
":",
"if",
"self",
".",
"node",
":",
"if",
"recursive",
":",
"if",
"self",
".",
"node",
".",
"left",
":",
"self",
".",
"node",
".",
"left",
".",
"update_heights",
"(",
")",
"if",
"self",
".",
"node",
".",
"right",
":",
"self",
".",
"node",
".",
"right",
".",
"update_heights",
"(",
")",
"self",
".",
"height",
"=",
"1",
"+",
"max",
"(",
"self",
".",
"node",
".",
"left",
".",
"height",
",",
"self",
".",
"node",
".",
"right",
".",
"height",
")",
"else",
":",
"self",
".",
"height",
"=",
"-",
"1"
] |
Update tree height
|
[
"Update",
"tree",
"height"
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/tree/avl/avl.py#L57-L70
|
21,550
|
keon/algorithms
|
algorithms/tree/avl/avl.py
|
AvlTree.update_balances
|
def update_balances(self, recursive=True):
"""
Calculate tree balance factor
"""
if self.node:
if recursive:
if self.node.left:
self.node.left.update_balances()
if self.node.right:
self.node.right.update_balances()
self.balance = self.node.left.height - self.node.right.height
else:
self.balance = 0
|
python
|
def update_balances(self, recursive=True):
"""
Calculate tree balance factor
"""
if self.node:
if recursive:
if self.node.left:
self.node.left.update_balances()
if self.node.right:
self.node.right.update_balances()
self.balance = self.node.left.height - self.node.right.height
else:
self.balance = 0
|
[
"def",
"update_balances",
"(",
"self",
",",
"recursive",
"=",
"True",
")",
":",
"if",
"self",
".",
"node",
":",
"if",
"recursive",
":",
"if",
"self",
".",
"node",
".",
"left",
":",
"self",
".",
"node",
".",
"left",
".",
"update_balances",
"(",
")",
"if",
"self",
".",
"node",
".",
"right",
":",
"self",
".",
"node",
".",
"right",
".",
"update_balances",
"(",
")",
"self",
".",
"balance",
"=",
"self",
".",
"node",
".",
"left",
".",
"height",
"-",
"self",
".",
"node",
".",
"right",
".",
"height",
"else",
":",
"self",
".",
"balance",
"=",
"0"
] |
Calculate tree balance factor
|
[
"Calculate",
"tree",
"balance",
"factor"
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/tree/avl/avl.py#L72-L86
|
21,551
|
keon/algorithms
|
algorithms/tree/avl/avl.py
|
AvlTree.in_order_traverse
|
def in_order_traverse(self):
"""
In-order traversal of the tree
"""
result = []
if not self.node:
return result
result.extend(self.node.left.in_order_traverse())
result.append(self.node.key)
result.extend(self.node.right.in_order_traverse())
return result
|
python
|
def in_order_traverse(self):
"""
In-order traversal of the tree
"""
result = []
if not self.node:
return result
result.extend(self.node.left.in_order_traverse())
result.append(self.node.key)
result.extend(self.node.right.in_order_traverse())
return result
|
[
"def",
"in_order_traverse",
"(",
"self",
")",
":",
"result",
"=",
"[",
"]",
"if",
"not",
"self",
".",
"node",
":",
"return",
"result",
"result",
".",
"extend",
"(",
"self",
".",
"node",
".",
"left",
".",
"in_order_traverse",
"(",
")",
")",
"result",
".",
"append",
"(",
"self",
".",
"node",
".",
"key",
")",
"result",
".",
"extend",
"(",
"self",
".",
"node",
".",
"right",
".",
"in_order_traverse",
"(",
")",
")",
"return",
"result"
] |
In-order traversal of the tree
|
[
"In",
"-",
"order",
"traversal",
"of",
"the",
"tree"
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/tree/avl/avl.py#L112-L124
|
21,552
|
keon/algorithms
|
algorithms/linkedlist/kth_to_last.py
|
kth_to_last_dict
|
def kth_to_last_dict(head, k):
"""
This is a brute force method where we keep a dict the size of the list
Then we check it for the value we need. If the key is not in the dict,
our and statement will short circuit and return False
"""
if not (head and k > -1):
return False
d = dict()
count = 0
while head:
d[count] = head
head = head.next
count += 1
return len(d)-k in d and d[len(d)-k]
|
python
|
def kth_to_last_dict(head, k):
"""
This is a brute force method where we keep a dict the size of the list
Then we check it for the value we need. If the key is not in the dict,
our and statement will short circuit and return False
"""
if not (head and k > -1):
return False
d = dict()
count = 0
while head:
d[count] = head
head = head.next
count += 1
return len(d)-k in d and d[len(d)-k]
|
[
"def",
"kth_to_last_dict",
"(",
"head",
",",
"k",
")",
":",
"if",
"not",
"(",
"head",
"and",
"k",
">",
"-",
"1",
")",
":",
"return",
"False",
"d",
"=",
"dict",
"(",
")",
"count",
"=",
"0",
"while",
"head",
":",
"d",
"[",
"count",
"]",
"=",
"head",
"head",
"=",
"head",
".",
"next",
"count",
"+=",
"1",
"return",
"len",
"(",
"d",
")",
"-",
"k",
"in",
"d",
"and",
"d",
"[",
"len",
"(",
"d",
")",
"-",
"k",
"]"
] |
This is a brute force method where we keep a dict the size of the list
Then we check it for the value we need. If the key is not in the dict,
our and statement will short circuit and return False
|
[
"This",
"is",
"a",
"brute",
"force",
"method",
"where",
"we",
"keep",
"a",
"dict",
"the",
"size",
"of",
"the",
"list",
"Then",
"we",
"check",
"it",
"for",
"the",
"value",
"we",
"need",
".",
"If",
"the",
"key",
"is",
"not",
"in",
"the",
"dict",
"our",
"and",
"statement",
"will",
"short",
"circuit",
"and",
"return",
"False"
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/linkedlist/kth_to_last.py#L27-L41
|
21,553
|
keon/algorithms
|
algorithms/linkedlist/kth_to_last.py
|
kth_to_last
|
def kth_to_last(head, k):
"""
This is an optimal method using iteration.
We move p1 k steps ahead into the list.
Then we move p1 and p2 together until p1 hits the end.
"""
if not (head or k > -1):
return False
p1 = head
p2 = head
for i in range(1, k+1):
if p1 is None:
# Went too far, k is not valid
raise IndexError
p1 = p1.next
while p1:
p1 = p1.next
p2 = p2.next
return p2
|
python
|
def kth_to_last(head, k):
"""
This is an optimal method using iteration.
We move p1 k steps ahead into the list.
Then we move p1 and p2 together until p1 hits the end.
"""
if not (head or k > -1):
return False
p1 = head
p2 = head
for i in range(1, k+1):
if p1 is None:
# Went too far, k is not valid
raise IndexError
p1 = p1.next
while p1:
p1 = p1.next
p2 = p2.next
return p2
|
[
"def",
"kth_to_last",
"(",
"head",
",",
"k",
")",
":",
"if",
"not",
"(",
"head",
"or",
"k",
">",
"-",
"1",
")",
":",
"return",
"False",
"p1",
"=",
"head",
"p2",
"=",
"head",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"k",
"+",
"1",
")",
":",
"if",
"p1",
"is",
"None",
":",
"# Went too far, k is not valid",
"raise",
"IndexError",
"p1",
"=",
"p1",
".",
"next",
"while",
"p1",
":",
"p1",
"=",
"p1",
".",
"next",
"p2",
"=",
"p2",
".",
"next",
"return",
"p2"
] |
This is an optimal method using iteration.
We move p1 k steps ahead into the list.
Then we move p1 and p2 together until p1 hits the end.
|
[
"This",
"is",
"an",
"optimal",
"method",
"using",
"iteration",
".",
"We",
"move",
"p1",
"k",
"steps",
"ahead",
"into",
"the",
"list",
".",
"Then",
"we",
"move",
"p1",
"and",
"p2",
"together",
"until",
"p1",
"hits",
"the",
"end",
"."
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/linkedlist/kth_to_last.py#L44-L62
|
21,554
|
keon/algorithms
|
algorithms/maths/combination.py
|
combination
|
def combination(n, r):
"""This function calculates nCr."""
if n == r or r == 0:
return 1
else:
return combination(n-1, r-1) + combination(n-1, r)
|
python
|
def combination(n, r):
"""This function calculates nCr."""
if n == r or r == 0:
return 1
else:
return combination(n-1, r-1) + combination(n-1, r)
|
[
"def",
"combination",
"(",
"n",
",",
"r",
")",
":",
"if",
"n",
"==",
"r",
"or",
"r",
"==",
"0",
":",
"return",
"1",
"else",
":",
"return",
"combination",
"(",
"n",
"-",
"1",
",",
"r",
"-",
"1",
")",
"+",
"combination",
"(",
"n",
"-",
"1",
",",
"r",
")"
] |
This function calculates nCr.
|
[
"This",
"function",
"calculates",
"nCr",
"."
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/maths/combination.py#L1-L6
|
21,555
|
keon/algorithms
|
algorithms/maths/combination.py
|
combination_memo
|
def combination_memo(n, r):
"""This function calculates nCr using memoization method."""
memo = {}
def recur(n, r):
if n == r or r == 0:
return 1
if (n, r) not in memo:
memo[(n, r)] = recur(n - 1, r - 1) + recur(n - 1, r)
return memo[(n, r)]
return recur(n, r)
|
python
|
def combination_memo(n, r):
"""This function calculates nCr using memoization method."""
memo = {}
def recur(n, r):
if n == r or r == 0:
return 1
if (n, r) not in memo:
memo[(n, r)] = recur(n - 1, r - 1) + recur(n - 1, r)
return memo[(n, r)]
return recur(n, r)
|
[
"def",
"combination_memo",
"(",
"n",
",",
"r",
")",
":",
"memo",
"=",
"{",
"}",
"def",
"recur",
"(",
"n",
",",
"r",
")",
":",
"if",
"n",
"==",
"r",
"or",
"r",
"==",
"0",
":",
"return",
"1",
"if",
"(",
"n",
",",
"r",
")",
"not",
"in",
"memo",
":",
"memo",
"[",
"(",
"n",
",",
"r",
")",
"]",
"=",
"recur",
"(",
"n",
"-",
"1",
",",
"r",
"-",
"1",
")",
"+",
"recur",
"(",
"n",
"-",
"1",
",",
"r",
")",
"return",
"memo",
"[",
"(",
"n",
",",
"r",
")",
"]",
"return",
"recur",
"(",
"n",
",",
"r",
")"
] |
This function calculates nCr using memoization method.
|
[
"This",
"function",
"calculates",
"nCr",
"using",
"memoization",
"method",
"."
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/maths/combination.py#L8-L17
|
21,556
|
keon/algorithms
|
algorithms/sort/pancake_sort.py
|
pancake_sort
|
def pancake_sort(arr):
"""
Pancake_sort
Sorting a given array
mutation of selection sort
reference: https://www.geeksforgeeks.org/pancake-sorting/
Overall time complexity : O(N^2)
"""
len_arr = len(arr)
if len_arr <= 1:
return arr
for cur in range(len(arr), 1, -1):
#Finding index of maximum number in arr
index_max = arr.index(max(arr[0:cur]))
if index_max+1 != cur:
#Needs moving
if index_max != 0:
#reverse from 0 to index_max
arr[:index_max+1] = reversed(arr[:index_max+1])
# Reverse list
arr[:cur] = reversed(arr[:cur])
return arr
|
python
|
def pancake_sort(arr):
"""
Pancake_sort
Sorting a given array
mutation of selection sort
reference: https://www.geeksforgeeks.org/pancake-sorting/
Overall time complexity : O(N^2)
"""
len_arr = len(arr)
if len_arr <= 1:
return arr
for cur in range(len(arr), 1, -1):
#Finding index of maximum number in arr
index_max = arr.index(max(arr[0:cur]))
if index_max+1 != cur:
#Needs moving
if index_max != 0:
#reverse from 0 to index_max
arr[:index_max+1] = reversed(arr[:index_max+1])
# Reverse list
arr[:cur] = reversed(arr[:cur])
return arr
|
[
"def",
"pancake_sort",
"(",
"arr",
")",
":",
"len_arr",
"=",
"len",
"(",
"arr",
")",
"if",
"len_arr",
"<=",
"1",
":",
"return",
"arr",
"for",
"cur",
"in",
"range",
"(",
"len",
"(",
"arr",
")",
",",
"1",
",",
"-",
"1",
")",
":",
"#Finding index of maximum number in arr",
"index_max",
"=",
"arr",
".",
"index",
"(",
"max",
"(",
"arr",
"[",
"0",
":",
"cur",
"]",
")",
")",
"if",
"index_max",
"+",
"1",
"!=",
"cur",
":",
"#Needs moving",
"if",
"index_max",
"!=",
"0",
":",
"#reverse from 0 to index_max",
"arr",
"[",
":",
"index_max",
"+",
"1",
"]",
"=",
"reversed",
"(",
"arr",
"[",
":",
"index_max",
"+",
"1",
"]",
")",
"# Reverse list",
"arr",
"[",
":",
"cur",
"]",
"=",
"reversed",
"(",
"arr",
"[",
":",
"cur",
"]",
")",
"return",
"arr"
] |
Pancake_sort
Sorting a given array
mutation of selection sort
reference: https://www.geeksforgeeks.org/pancake-sorting/
Overall time complexity : O(N^2)
|
[
"Pancake_sort",
"Sorting",
"a",
"given",
"array",
"mutation",
"of",
"selection",
"sort"
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/sort/pancake_sort.py#L1-L25
|
21,557
|
keon/algorithms
|
algorithms/graph/satisfiability.py
|
scc
|
def scc(graph):
''' Computes the strongly connected components of a graph '''
order = []
vis = {vertex: False for vertex in graph}
graph_transposed = {vertex: [] for vertex in graph}
for (v, neighbours) in graph.iteritems():
for u in neighbours:
add_edge(graph_transposed, u, v)
for v in graph:
if not vis[v]:
dfs_transposed(v, graph_transposed, order, vis)
vis = {vertex: False for vertex in graph}
vertex_scc = {}
current_comp = 0
for v in reversed(order):
if not vis[v]:
# Each dfs will visit exactly one component
dfs(v, current_comp, vertex_scc, graph, vis)
current_comp += 1
return vertex_scc
|
python
|
def scc(graph):
''' Computes the strongly connected components of a graph '''
order = []
vis = {vertex: False for vertex in graph}
graph_transposed = {vertex: [] for vertex in graph}
for (v, neighbours) in graph.iteritems():
for u in neighbours:
add_edge(graph_transposed, u, v)
for v in graph:
if not vis[v]:
dfs_transposed(v, graph_transposed, order, vis)
vis = {vertex: False for vertex in graph}
vertex_scc = {}
current_comp = 0
for v in reversed(order):
if not vis[v]:
# Each dfs will visit exactly one component
dfs(v, current_comp, vertex_scc, graph, vis)
current_comp += 1
return vertex_scc
|
[
"def",
"scc",
"(",
"graph",
")",
":",
"order",
"=",
"[",
"]",
"vis",
"=",
"{",
"vertex",
":",
"False",
"for",
"vertex",
"in",
"graph",
"}",
"graph_transposed",
"=",
"{",
"vertex",
":",
"[",
"]",
"for",
"vertex",
"in",
"graph",
"}",
"for",
"(",
"v",
",",
"neighbours",
")",
"in",
"graph",
".",
"iteritems",
"(",
")",
":",
"for",
"u",
"in",
"neighbours",
":",
"add_edge",
"(",
"graph_transposed",
",",
"u",
",",
"v",
")",
"for",
"v",
"in",
"graph",
":",
"if",
"not",
"vis",
"[",
"v",
"]",
":",
"dfs_transposed",
"(",
"v",
",",
"graph_transposed",
",",
"order",
",",
"vis",
")",
"vis",
"=",
"{",
"vertex",
":",
"False",
"for",
"vertex",
"in",
"graph",
"}",
"vertex_scc",
"=",
"{",
"}",
"current_comp",
"=",
"0",
"for",
"v",
"in",
"reversed",
"(",
"order",
")",
":",
"if",
"not",
"vis",
"[",
"v",
"]",
":",
"# Each dfs will visit exactly one component",
"dfs",
"(",
"v",
",",
"current_comp",
",",
"vertex_scc",
",",
"graph",
",",
"vis",
")",
"current_comp",
"+=",
"1",
"return",
"vertex_scc"
] |
Computes the strongly connected components of a graph
|
[
"Computes",
"the",
"strongly",
"connected",
"components",
"of",
"a",
"graph"
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/graph/satisfiability.py#L49-L74
|
21,558
|
keon/algorithms
|
algorithms/graph/satisfiability.py
|
build_graph
|
def build_graph(formula):
''' Builds the implication graph from the formula '''
graph = {}
for clause in formula:
for (lit, _) in clause:
for neg in [False, True]:
graph[(lit, neg)] = []
for ((a_lit, a_neg), (b_lit, b_neg)) in formula:
add_edge(graph, (a_lit, a_neg), (b_lit, not b_neg))
add_edge(graph, (b_lit, b_neg), (a_lit, not a_neg))
return graph
|
python
|
def build_graph(formula):
''' Builds the implication graph from the formula '''
graph = {}
for clause in formula:
for (lit, _) in clause:
for neg in [False, True]:
graph[(lit, neg)] = []
for ((a_lit, a_neg), (b_lit, b_neg)) in formula:
add_edge(graph, (a_lit, a_neg), (b_lit, not b_neg))
add_edge(graph, (b_lit, b_neg), (a_lit, not a_neg))
return graph
|
[
"def",
"build_graph",
"(",
"formula",
")",
":",
"graph",
"=",
"{",
"}",
"for",
"clause",
"in",
"formula",
":",
"for",
"(",
"lit",
",",
"_",
")",
"in",
"clause",
":",
"for",
"neg",
"in",
"[",
"False",
",",
"True",
"]",
":",
"graph",
"[",
"(",
"lit",
",",
"neg",
")",
"]",
"=",
"[",
"]",
"for",
"(",
"(",
"a_lit",
",",
"a_neg",
")",
",",
"(",
"b_lit",
",",
"b_neg",
")",
")",
"in",
"formula",
":",
"add_edge",
"(",
"graph",
",",
"(",
"a_lit",
",",
"a_neg",
")",
",",
"(",
"b_lit",
",",
"not",
"b_neg",
")",
")",
"add_edge",
"(",
"graph",
",",
"(",
"b_lit",
",",
"b_neg",
")",
",",
"(",
"a_lit",
",",
"not",
"a_neg",
")",
")",
"return",
"graph"
] |
Builds the implication graph from the formula
|
[
"Builds",
"the",
"implication",
"graph",
"from",
"the",
"formula"
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/graph/satisfiability.py#L77-L90
|
21,559
|
keon/algorithms
|
algorithms/tree/is_balanced.py
|
__get_depth
|
def __get_depth(root):
"""
return 0 if unbalanced else depth + 1
"""
if root is None:
return 0
left = __get_depth(root.left)
right = __get_depth(root.right)
if abs(left-right) > 1 or -1 in [left, right]:
return -1
return 1 + max(left, right)
|
python
|
def __get_depth(root):
"""
return 0 if unbalanced else depth + 1
"""
if root is None:
return 0
left = __get_depth(root.left)
right = __get_depth(root.right)
if abs(left-right) > 1 or -1 in [left, right]:
return -1
return 1 + max(left, right)
|
[
"def",
"__get_depth",
"(",
"root",
")",
":",
"if",
"root",
"is",
"None",
":",
"return",
"0",
"left",
"=",
"__get_depth",
"(",
"root",
".",
"left",
")",
"right",
"=",
"__get_depth",
"(",
"root",
".",
"right",
")",
"if",
"abs",
"(",
"left",
"-",
"right",
")",
">",
"1",
"or",
"-",
"1",
"in",
"[",
"left",
",",
"right",
"]",
":",
"return",
"-",
"1",
"return",
"1",
"+",
"max",
"(",
"left",
",",
"right",
")"
] |
return 0 if unbalanced else depth + 1
|
[
"return",
"0",
"if",
"unbalanced",
"else",
"depth",
"+",
"1"
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/tree/is_balanced.py#L12-L22
|
21,560
|
keon/algorithms
|
algorithms/backtrack/palindrome_partitioning.py
|
palindromic_substrings_iter
|
def palindromic_substrings_iter(s):
"""
A slightly more Pythonic approach with a recursive generator
"""
if not s:
yield []
return
for i in range(len(s), 0, -1):
sub = s[:i]
if sub == sub[::-1]:
for rest in palindromic_substrings_iter(s[i:]):
yield [sub] + rest
|
python
|
def palindromic_substrings_iter(s):
"""
A slightly more Pythonic approach with a recursive generator
"""
if not s:
yield []
return
for i in range(len(s), 0, -1):
sub = s[:i]
if sub == sub[::-1]:
for rest in palindromic_substrings_iter(s[i:]):
yield [sub] + rest
|
[
"def",
"palindromic_substrings_iter",
"(",
"s",
")",
":",
"if",
"not",
"s",
":",
"yield",
"[",
"]",
"return",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"s",
")",
",",
"0",
",",
"-",
"1",
")",
":",
"sub",
"=",
"s",
"[",
":",
"i",
"]",
"if",
"sub",
"==",
"sub",
"[",
":",
":",
"-",
"1",
"]",
":",
"for",
"rest",
"in",
"palindromic_substrings_iter",
"(",
"s",
"[",
"i",
":",
"]",
")",
":",
"yield",
"[",
"sub",
"]",
"+",
"rest"
] |
A slightly more Pythonic approach with a recursive generator
|
[
"A",
"slightly",
"more",
"Pythonic",
"approach",
"with",
"a",
"recursive",
"generator"
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/backtrack/palindrome_partitioning.py#L34-L45
|
21,561
|
keon/algorithms
|
algorithms/calculator/math_parser.py
|
main
|
def main():
"""
simple user-interface
"""
print("\t\tCalculator\n\n")
while True:
user_input = input("expression or exit: ")
if user_input == "exit":
break
try:
print("The result is {0}".format(evaluate(user_input)))
except Exception:
print("invalid syntax!")
user_input = input("expression or exit: ")
print("program end")
|
python
|
def main():
"""
simple user-interface
"""
print("\t\tCalculator\n\n")
while True:
user_input = input("expression or exit: ")
if user_input == "exit":
break
try:
print("The result is {0}".format(evaluate(user_input)))
except Exception:
print("invalid syntax!")
user_input = input("expression or exit: ")
print("program end")
|
[
"def",
"main",
"(",
")",
":",
"print",
"(",
"\"\\t\\tCalculator\\n\\n\"",
")",
"while",
"True",
":",
"user_input",
"=",
"input",
"(",
"\"expression or exit: \"",
")",
"if",
"user_input",
"==",
"\"exit\"",
":",
"break",
"try",
":",
"print",
"(",
"\"The result is {0}\"",
".",
"format",
"(",
"evaluate",
"(",
"user_input",
")",
")",
")",
"except",
"Exception",
":",
"print",
"(",
"\"invalid syntax!\"",
")",
"user_input",
"=",
"input",
"(",
"\"expression or exit: \"",
")",
"print",
"(",
"\"program end\"",
")"
] |
simple user-interface
|
[
"simple",
"user",
"-",
"interface"
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/calculator/math_parser.py#L131-L145
|
21,562
|
keon/algorithms
|
algorithms/maths/primes_sieve_of_eratosthenes.py
|
get_primes
|
def get_primes(n):
"""Return list of all primes less than n,
Using sieve of Eratosthenes.
"""
if n <= 0:
raise ValueError("'n' must be a positive integer.")
# If x is even, exclude x from list (-1):
sieve_size = (n // 2 - 1) if n % 2 == 0 else (n // 2)
sieve = [True for _ in range(sieve_size)] # Sieve
primes = [] # List of Primes
if n >= 2:
primes.append(2) # 2 is prime by default
for i in range(sieve_size):
if sieve[i]:
value_at_i = i*2 + 3
primes.append(value_at_i)
for j in range(i, sieve_size, value_at_i):
sieve[j] = False
return primes
|
python
|
def get_primes(n):
"""Return list of all primes less than n,
Using sieve of Eratosthenes.
"""
if n <= 0:
raise ValueError("'n' must be a positive integer.")
# If x is even, exclude x from list (-1):
sieve_size = (n // 2 - 1) if n % 2 == 0 else (n // 2)
sieve = [True for _ in range(sieve_size)] # Sieve
primes = [] # List of Primes
if n >= 2:
primes.append(2) # 2 is prime by default
for i in range(sieve_size):
if sieve[i]:
value_at_i = i*2 + 3
primes.append(value_at_i)
for j in range(i, sieve_size, value_at_i):
sieve[j] = False
return primes
|
[
"def",
"get_primes",
"(",
"n",
")",
":",
"if",
"n",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"'n' must be a positive integer.\"",
")",
"# If x is even, exclude x from list (-1):",
"sieve_size",
"=",
"(",
"n",
"//",
"2",
"-",
"1",
")",
"if",
"n",
"%",
"2",
"==",
"0",
"else",
"(",
"n",
"//",
"2",
")",
"sieve",
"=",
"[",
"True",
"for",
"_",
"in",
"range",
"(",
"sieve_size",
")",
"]",
"# Sieve",
"primes",
"=",
"[",
"]",
"# List of Primes",
"if",
"n",
">=",
"2",
":",
"primes",
".",
"append",
"(",
"2",
")",
"# 2 is prime by default",
"for",
"i",
"in",
"range",
"(",
"sieve_size",
")",
":",
"if",
"sieve",
"[",
"i",
"]",
":",
"value_at_i",
"=",
"i",
"*",
"2",
"+",
"3",
"primes",
".",
"append",
"(",
"value_at_i",
")",
"for",
"j",
"in",
"range",
"(",
"i",
",",
"sieve_size",
",",
"value_at_i",
")",
":",
"sieve",
"[",
"j",
"]",
"=",
"False",
"return",
"primes"
] |
Return list of all primes less than n,
Using sieve of Eratosthenes.
|
[
"Return",
"list",
"of",
"all",
"primes",
"less",
"than",
"n",
"Using",
"sieve",
"of",
"Eratosthenes",
"."
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/maths/primes_sieve_of_eratosthenes.py#L28-L46
|
21,563
|
keon/algorithms
|
algorithms/backtrack/permute.py
|
permute
|
def permute(elements):
"""
returns a list with the permuations.
"""
if len(elements) <= 1:
return [elements]
else:
tmp = []
for perm in permute(elements[1:]):
for i in range(len(elements)):
tmp.append(perm[:i] + elements[0:1] + perm[i:])
return tmp
|
python
|
def permute(elements):
"""
returns a list with the permuations.
"""
if len(elements) <= 1:
return [elements]
else:
tmp = []
for perm in permute(elements[1:]):
for i in range(len(elements)):
tmp.append(perm[:i] + elements[0:1] + perm[i:])
return tmp
|
[
"def",
"permute",
"(",
"elements",
")",
":",
"if",
"len",
"(",
"elements",
")",
"<=",
"1",
":",
"return",
"[",
"elements",
"]",
"else",
":",
"tmp",
"=",
"[",
"]",
"for",
"perm",
"in",
"permute",
"(",
"elements",
"[",
"1",
":",
"]",
")",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"elements",
")",
")",
":",
"tmp",
".",
"append",
"(",
"perm",
"[",
":",
"i",
"]",
"+",
"elements",
"[",
"0",
":",
"1",
"]",
"+",
"perm",
"[",
"i",
":",
"]",
")",
"return",
"tmp"
] |
returns a list with the permuations.
|
[
"returns",
"a",
"list",
"with",
"the",
"permuations",
"."
] |
4d6569464a62a75c1357acc97e2dd32ee2f9f4a3
|
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/backtrack/permute.py#L17-L28
|
21,564
|
dmlc/xgboost
|
python-package/xgboost/rabit.py
|
_init_rabit
|
def _init_rabit():
"""internal library initializer."""
if _LIB is not None:
_LIB.RabitGetRank.restype = ctypes.c_int
_LIB.RabitGetWorldSize.restype = ctypes.c_int
_LIB.RabitIsDistributed.restype = ctypes.c_int
_LIB.RabitVersionNumber.restype = ctypes.c_int
|
python
|
def _init_rabit():
"""internal library initializer."""
if _LIB is not None:
_LIB.RabitGetRank.restype = ctypes.c_int
_LIB.RabitGetWorldSize.restype = ctypes.c_int
_LIB.RabitIsDistributed.restype = ctypes.c_int
_LIB.RabitVersionNumber.restype = ctypes.c_int
|
[
"def",
"_init_rabit",
"(",
")",
":",
"if",
"_LIB",
"is",
"not",
"None",
":",
"_LIB",
".",
"RabitGetRank",
".",
"restype",
"=",
"ctypes",
".",
"c_int",
"_LIB",
".",
"RabitGetWorldSize",
".",
"restype",
"=",
"ctypes",
".",
"c_int",
"_LIB",
".",
"RabitIsDistributed",
".",
"restype",
"=",
"ctypes",
".",
"c_int",
"_LIB",
".",
"RabitVersionNumber",
".",
"restype",
"=",
"ctypes",
".",
"c_int"
] |
internal library initializer.
|
[
"internal",
"library",
"initializer",
"."
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/rabit.py#L14-L20
|
21,565
|
dmlc/xgboost
|
python-package/xgboost/rabit.py
|
init
|
def init(args=None):
"""Initialize the rabit library with arguments"""
if args is None:
args = []
arr = (ctypes.c_char_p * len(args))()
arr[:] = args
_LIB.RabitInit(len(arr), arr)
|
python
|
def init(args=None):
"""Initialize the rabit library with arguments"""
if args is None:
args = []
arr = (ctypes.c_char_p * len(args))()
arr[:] = args
_LIB.RabitInit(len(arr), arr)
|
[
"def",
"init",
"(",
"args",
"=",
"None",
")",
":",
"if",
"args",
"is",
"None",
":",
"args",
"=",
"[",
"]",
"arr",
"=",
"(",
"ctypes",
".",
"c_char_p",
"*",
"len",
"(",
"args",
")",
")",
"(",
")",
"arr",
"[",
":",
"]",
"=",
"args",
"_LIB",
".",
"RabitInit",
"(",
"len",
"(",
"arr",
")",
",",
"arr",
")"
] |
Initialize the rabit library with arguments
|
[
"Initialize",
"the",
"rabit",
"library",
"with",
"arguments"
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/rabit.py#L23-L29
|
21,566
|
dmlc/xgboost
|
python-package/xgboost/rabit.py
|
get_processor_name
|
def get_processor_name():
"""Get the processor name.
Returns
-------
name : str
the name of processor(host)
"""
mxlen = 256
length = ctypes.c_ulong()
buf = ctypes.create_string_buffer(mxlen)
_LIB.RabitGetProcessorName(buf, ctypes.byref(length), mxlen)
return buf.value
|
python
|
def get_processor_name():
"""Get the processor name.
Returns
-------
name : str
the name of processor(host)
"""
mxlen = 256
length = ctypes.c_ulong()
buf = ctypes.create_string_buffer(mxlen)
_LIB.RabitGetProcessorName(buf, ctypes.byref(length), mxlen)
return buf.value
|
[
"def",
"get_processor_name",
"(",
")",
":",
"mxlen",
"=",
"256",
"length",
"=",
"ctypes",
".",
"c_ulong",
"(",
")",
"buf",
"=",
"ctypes",
".",
"create_string_buffer",
"(",
"mxlen",
")",
"_LIB",
".",
"RabitGetProcessorName",
"(",
"buf",
",",
"ctypes",
".",
"byref",
"(",
"length",
")",
",",
"mxlen",
")",
"return",
"buf",
".",
"value"
] |
Get the processor name.
Returns
-------
name : str
the name of processor(host)
|
[
"Get",
"the",
"processor",
"name",
"."
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/rabit.py#L82-L94
|
21,567
|
dmlc/xgboost
|
python-package/xgboost/rabit.py
|
broadcast
|
def broadcast(data, root):
"""Broadcast object from one node to all other nodes.
Parameters
----------
data : any type that can be pickled
Input data, if current rank does not equal root, this can be None
root : int
Rank of the node to broadcast data from.
Returns
-------
object : int
the result of broadcast.
"""
rank = get_rank()
length = ctypes.c_ulong()
if root == rank:
assert data is not None, 'need to pass in data when broadcasting'
s = pickle.dumps(data, protocol=pickle.HIGHEST_PROTOCOL)
length.value = len(s)
# run first broadcast
_LIB.RabitBroadcast(ctypes.byref(length),
ctypes.sizeof(ctypes.c_ulong), root)
if root != rank:
dptr = (ctypes.c_char * length.value)()
# run second
_LIB.RabitBroadcast(ctypes.cast(dptr, ctypes.c_void_p),
length.value, root)
data = pickle.loads(dptr.raw)
del dptr
else:
_LIB.RabitBroadcast(ctypes.cast(ctypes.c_char_p(s), ctypes.c_void_p),
length.value, root)
del s
return data
|
python
|
def broadcast(data, root):
"""Broadcast object from one node to all other nodes.
Parameters
----------
data : any type that can be pickled
Input data, if current rank does not equal root, this can be None
root : int
Rank of the node to broadcast data from.
Returns
-------
object : int
the result of broadcast.
"""
rank = get_rank()
length = ctypes.c_ulong()
if root == rank:
assert data is not None, 'need to pass in data when broadcasting'
s = pickle.dumps(data, protocol=pickle.HIGHEST_PROTOCOL)
length.value = len(s)
# run first broadcast
_LIB.RabitBroadcast(ctypes.byref(length),
ctypes.sizeof(ctypes.c_ulong), root)
if root != rank:
dptr = (ctypes.c_char * length.value)()
# run second
_LIB.RabitBroadcast(ctypes.cast(dptr, ctypes.c_void_p),
length.value, root)
data = pickle.loads(dptr.raw)
del dptr
else:
_LIB.RabitBroadcast(ctypes.cast(ctypes.c_char_p(s), ctypes.c_void_p),
length.value, root)
del s
return data
|
[
"def",
"broadcast",
"(",
"data",
",",
"root",
")",
":",
"rank",
"=",
"get_rank",
"(",
")",
"length",
"=",
"ctypes",
".",
"c_ulong",
"(",
")",
"if",
"root",
"==",
"rank",
":",
"assert",
"data",
"is",
"not",
"None",
",",
"'need to pass in data when broadcasting'",
"s",
"=",
"pickle",
".",
"dumps",
"(",
"data",
",",
"protocol",
"=",
"pickle",
".",
"HIGHEST_PROTOCOL",
")",
"length",
".",
"value",
"=",
"len",
"(",
"s",
")",
"# run first broadcast",
"_LIB",
".",
"RabitBroadcast",
"(",
"ctypes",
".",
"byref",
"(",
"length",
")",
",",
"ctypes",
".",
"sizeof",
"(",
"ctypes",
".",
"c_ulong",
")",
",",
"root",
")",
"if",
"root",
"!=",
"rank",
":",
"dptr",
"=",
"(",
"ctypes",
".",
"c_char",
"*",
"length",
".",
"value",
")",
"(",
")",
"# run second",
"_LIB",
".",
"RabitBroadcast",
"(",
"ctypes",
".",
"cast",
"(",
"dptr",
",",
"ctypes",
".",
"c_void_p",
")",
",",
"length",
".",
"value",
",",
"root",
")",
"data",
"=",
"pickle",
".",
"loads",
"(",
"dptr",
".",
"raw",
")",
"del",
"dptr",
"else",
":",
"_LIB",
".",
"RabitBroadcast",
"(",
"ctypes",
".",
"cast",
"(",
"ctypes",
".",
"c_char_p",
"(",
"s",
")",
",",
"ctypes",
".",
"c_void_p",
")",
",",
"length",
".",
"value",
",",
"root",
")",
"del",
"s",
"return",
"data"
] |
Broadcast object from one node to all other nodes.
Parameters
----------
data : any type that can be pickled
Input data, if current rank does not equal root, this can be None
root : int
Rank of the node to broadcast data from.
Returns
-------
object : int
the result of broadcast.
|
[
"Broadcast",
"object",
"from",
"one",
"node",
"to",
"all",
"other",
"nodes",
"."
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/rabit.py#L97-L132
|
21,568
|
dmlc/xgboost
|
jvm-packages/create_jni.py
|
normpath
|
def normpath(path):
"""Normalize UNIX path to a native path."""
normalized = os.path.join(*path.split("/"))
if os.path.isabs(path):
return os.path.abspath("/") + normalized
else:
return normalized
|
python
|
def normpath(path):
"""Normalize UNIX path to a native path."""
normalized = os.path.join(*path.split("/"))
if os.path.isabs(path):
return os.path.abspath("/") + normalized
else:
return normalized
|
[
"def",
"normpath",
"(",
"path",
")",
":",
"normalized",
"=",
"os",
".",
"path",
".",
"join",
"(",
"*",
"path",
".",
"split",
"(",
"\"/\"",
")",
")",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"path",
")",
":",
"return",
"os",
".",
"path",
".",
"abspath",
"(",
"\"/\"",
")",
"+",
"normalized",
"else",
":",
"return",
"normalized"
] |
Normalize UNIX path to a native path.
|
[
"Normalize",
"UNIX",
"path",
"to",
"a",
"native",
"path",
"."
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/jvm-packages/create_jni.py#L61-L67
|
21,569
|
dmlc/xgboost
|
python-package/xgboost/training.py
|
CVPack.update
|
def update(self, iteration, fobj):
""""Update the boosters for one iteration"""
self.bst.update(self.dtrain, iteration, fobj)
|
python
|
def update(self, iteration, fobj):
""""Update the boosters for one iteration"""
self.bst.update(self.dtrain, iteration, fobj)
|
[
"def",
"update",
"(",
"self",
",",
"iteration",
",",
"fobj",
")",
":",
"self",
".",
"bst",
".",
"update",
"(",
"self",
".",
"dtrain",
",",
"iteration",
",",
"fobj",
")"
] |
Update the boosters for one iteration
|
[
"Update",
"the",
"boosters",
"for",
"one",
"iteration"
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/training.py#L228-L230
|
21,570
|
dmlc/xgboost
|
python-package/xgboost/training.py
|
CVPack.eval
|
def eval(self, iteration, feval):
""""Evaluate the CVPack for one iteration."""
return self.bst.eval_set(self.watchlist, iteration, feval)
|
python
|
def eval(self, iteration, feval):
""""Evaluate the CVPack for one iteration."""
return self.bst.eval_set(self.watchlist, iteration, feval)
|
[
"def",
"eval",
"(",
"self",
",",
"iteration",
",",
"feval",
")",
":",
"return",
"self",
".",
"bst",
".",
"eval_set",
"(",
"self",
".",
"watchlist",
",",
"iteration",
",",
"feval",
")"
] |
Evaluate the CVPack for one iteration.
|
[
"Evaluate",
"the",
"CVPack",
"for",
"one",
"iteration",
"."
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/training.py#L232-L234
|
21,571
|
dmlc/xgboost
|
python-package/xgboost/callback.py
|
_get_callback_context
|
def _get_callback_context(env):
"""return whether the current callback context is cv or train"""
if env.model is not None and env.cvfolds is None:
context = 'train'
elif env.model is None and env.cvfolds is not None:
context = 'cv'
return context
|
python
|
def _get_callback_context(env):
"""return whether the current callback context is cv or train"""
if env.model is not None and env.cvfolds is None:
context = 'train'
elif env.model is None and env.cvfolds is not None:
context = 'cv'
return context
|
[
"def",
"_get_callback_context",
"(",
"env",
")",
":",
"if",
"env",
".",
"model",
"is",
"not",
"None",
"and",
"env",
".",
"cvfolds",
"is",
"None",
":",
"context",
"=",
"'train'",
"elif",
"env",
".",
"model",
"is",
"None",
"and",
"env",
".",
"cvfolds",
"is",
"not",
"None",
":",
"context",
"=",
"'cv'",
"return",
"context"
] |
return whether the current callback context is cv or train
|
[
"return",
"whether",
"the",
"current",
"callback",
"context",
"is",
"cv",
"or",
"train"
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/callback.py#L10-L16
|
21,572
|
dmlc/xgboost
|
python-package/xgboost/callback.py
|
_fmt_metric
|
def _fmt_metric(value, show_stdv=True):
"""format metric string"""
if len(value) == 2:
return '%s:%g' % (value[0], value[1])
if len(value) == 3:
if show_stdv:
return '%s:%g+%g' % (value[0], value[1], value[2])
return '%s:%g' % (value[0], value[1])
raise ValueError("wrong metric value")
|
python
|
def _fmt_metric(value, show_stdv=True):
"""format metric string"""
if len(value) == 2:
return '%s:%g' % (value[0], value[1])
if len(value) == 3:
if show_stdv:
return '%s:%g+%g' % (value[0], value[1], value[2])
return '%s:%g' % (value[0], value[1])
raise ValueError("wrong metric value")
|
[
"def",
"_fmt_metric",
"(",
"value",
",",
"show_stdv",
"=",
"True",
")",
":",
"if",
"len",
"(",
"value",
")",
"==",
"2",
":",
"return",
"'%s:%g'",
"%",
"(",
"value",
"[",
"0",
"]",
",",
"value",
"[",
"1",
"]",
")",
"if",
"len",
"(",
"value",
")",
"==",
"3",
":",
"if",
"show_stdv",
":",
"return",
"'%s:%g+%g'",
"%",
"(",
"value",
"[",
"0",
"]",
",",
"value",
"[",
"1",
"]",
",",
"value",
"[",
"2",
"]",
")",
"return",
"'%s:%g'",
"%",
"(",
"value",
"[",
"0",
"]",
",",
"value",
"[",
"1",
"]",
")",
"raise",
"ValueError",
"(",
"\"wrong metric value\"",
")"
] |
format metric string
|
[
"format",
"metric",
"string"
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/callback.py#L19-L27
|
21,573
|
dmlc/xgboost
|
python-package/xgboost/callback.py
|
print_evaluation
|
def print_evaluation(period=1, show_stdv=True):
"""Create a callback that print evaluation result.
We print the evaluation results every **period** iterations
and on the first and the last iterations.
Parameters
----------
period : int
The period to log the evaluation results
show_stdv : bool, optional
Whether show stdv if provided
Returns
-------
callback : function
A callback that print evaluation every period iterations.
"""
def callback(env):
"""internal function"""
if env.rank != 0 or (not env.evaluation_result_list) or period is False or period == 0:
return
i = env.iteration
if i % period == 0 or i + 1 == env.begin_iteration or i + 1 == env.end_iteration:
msg = '\t'.join([_fmt_metric(x, show_stdv) for x in env.evaluation_result_list])
rabit.tracker_print('[%d]\t%s\n' % (i, msg))
return callback
|
python
|
def print_evaluation(period=1, show_stdv=True):
"""Create a callback that print evaluation result.
We print the evaluation results every **period** iterations
and on the first and the last iterations.
Parameters
----------
period : int
The period to log the evaluation results
show_stdv : bool, optional
Whether show stdv if provided
Returns
-------
callback : function
A callback that print evaluation every period iterations.
"""
def callback(env):
"""internal function"""
if env.rank != 0 or (not env.evaluation_result_list) or period is False or period == 0:
return
i = env.iteration
if i % period == 0 or i + 1 == env.begin_iteration or i + 1 == env.end_iteration:
msg = '\t'.join([_fmt_metric(x, show_stdv) for x in env.evaluation_result_list])
rabit.tracker_print('[%d]\t%s\n' % (i, msg))
return callback
|
[
"def",
"print_evaluation",
"(",
"period",
"=",
"1",
",",
"show_stdv",
"=",
"True",
")",
":",
"def",
"callback",
"(",
"env",
")",
":",
"\"\"\"internal function\"\"\"",
"if",
"env",
".",
"rank",
"!=",
"0",
"or",
"(",
"not",
"env",
".",
"evaluation_result_list",
")",
"or",
"period",
"is",
"False",
"or",
"period",
"==",
"0",
":",
"return",
"i",
"=",
"env",
".",
"iteration",
"if",
"i",
"%",
"period",
"==",
"0",
"or",
"i",
"+",
"1",
"==",
"env",
".",
"begin_iteration",
"or",
"i",
"+",
"1",
"==",
"env",
".",
"end_iteration",
":",
"msg",
"=",
"'\\t'",
".",
"join",
"(",
"[",
"_fmt_metric",
"(",
"x",
",",
"show_stdv",
")",
"for",
"x",
"in",
"env",
".",
"evaluation_result_list",
"]",
")",
"rabit",
".",
"tracker_print",
"(",
"'[%d]\\t%s\\n'",
"%",
"(",
"i",
",",
"msg",
")",
")",
"return",
"callback"
] |
Create a callback that print evaluation result.
We print the evaluation results every **period** iterations
and on the first and the last iterations.
Parameters
----------
period : int
The period to log the evaluation results
show_stdv : bool, optional
Whether show stdv if provided
Returns
-------
callback : function
A callback that print evaluation every period iterations.
|
[
"Create",
"a",
"callback",
"that",
"print",
"evaluation",
"result",
"."
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/callback.py#L30-L57
|
21,574
|
dmlc/xgboost
|
python-package/xgboost/callback.py
|
reset_learning_rate
|
def reset_learning_rate(learning_rates):
"""Reset learning rate after iteration 1
NOTE: the initial learning rate will still take in-effect on first iteration.
Parameters
----------
learning_rates: list or function
List of learning rate for each boosting round
or a customized function that calculates eta in terms of
current number of round and the total number of boosting round (e.g.
yields learning rate decay)
* list ``l``: ``eta = l[boosting_round]``
* function ``f``: ``eta = f(boosting_round, num_boost_round)``
Returns
-------
callback : function
The requested callback function.
"""
def get_learning_rate(i, n, learning_rates):
"""helper providing the learning rate"""
if isinstance(learning_rates, list):
if len(learning_rates) != n:
raise ValueError("Length of list 'learning_rates' has to equal 'num_boost_round'.")
new_learning_rate = learning_rates[i]
else:
new_learning_rate = learning_rates(i, n)
return new_learning_rate
def callback(env):
"""internal function"""
context = _get_callback_context(env)
if context == 'train':
bst, i, n = env.model, env.iteration, env.end_iteration
bst.set_param('learning_rate', get_learning_rate(i, n, learning_rates))
elif context == 'cv':
i, n = env.iteration, env.end_iteration
for cvpack in env.cvfolds:
bst = cvpack.bst
bst.set_param('learning_rate', get_learning_rate(i, n, learning_rates))
callback.before_iteration = True
return callback
|
python
|
def reset_learning_rate(learning_rates):
"""Reset learning rate after iteration 1
NOTE: the initial learning rate will still take in-effect on first iteration.
Parameters
----------
learning_rates: list or function
List of learning rate for each boosting round
or a customized function that calculates eta in terms of
current number of round and the total number of boosting round (e.g.
yields learning rate decay)
* list ``l``: ``eta = l[boosting_round]``
* function ``f``: ``eta = f(boosting_round, num_boost_round)``
Returns
-------
callback : function
The requested callback function.
"""
def get_learning_rate(i, n, learning_rates):
"""helper providing the learning rate"""
if isinstance(learning_rates, list):
if len(learning_rates) != n:
raise ValueError("Length of list 'learning_rates' has to equal 'num_boost_round'.")
new_learning_rate = learning_rates[i]
else:
new_learning_rate = learning_rates(i, n)
return new_learning_rate
def callback(env):
"""internal function"""
context = _get_callback_context(env)
if context == 'train':
bst, i, n = env.model, env.iteration, env.end_iteration
bst.set_param('learning_rate', get_learning_rate(i, n, learning_rates))
elif context == 'cv':
i, n = env.iteration, env.end_iteration
for cvpack in env.cvfolds:
bst = cvpack.bst
bst.set_param('learning_rate', get_learning_rate(i, n, learning_rates))
callback.before_iteration = True
return callback
|
[
"def",
"reset_learning_rate",
"(",
"learning_rates",
")",
":",
"def",
"get_learning_rate",
"(",
"i",
",",
"n",
",",
"learning_rates",
")",
":",
"\"\"\"helper providing the learning rate\"\"\"",
"if",
"isinstance",
"(",
"learning_rates",
",",
"list",
")",
":",
"if",
"len",
"(",
"learning_rates",
")",
"!=",
"n",
":",
"raise",
"ValueError",
"(",
"\"Length of list 'learning_rates' has to equal 'num_boost_round'.\"",
")",
"new_learning_rate",
"=",
"learning_rates",
"[",
"i",
"]",
"else",
":",
"new_learning_rate",
"=",
"learning_rates",
"(",
"i",
",",
"n",
")",
"return",
"new_learning_rate",
"def",
"callback",
"(",
"env",
")",
":",
"\"\"\"internal function\"\"\"",
"context",
"=",
"_get_callback_context",
"(",
"env",
")",
"if",
"context",
"==",
"'train'",
":",
"bst",
",",
"i",
",",
"n",
"=",
"env",
".",
"model",
",",
"env",
".",
"iteration",
",",
"env",
".",
"end_iteration",
"bst",
".",
"set_param",
"(",
"'learning_rate'",
",",
"get_learning_rate",
"(",
"i",
",",
"n",
",",
"learning_rates",
")",
")",
"elif",
"context",
"==",
"'cv'",
":",
"i",
",",
"n",
"=",
"env",
".",
"iteration",
",",
"env",
".",
"end_iteration",
"for",
"cvpack",
"in",
"env",
".",
"cvfolds",
":",
"bst",
"=",
"cvpack",
".",
"bst",
"bst",
".",
"set_param",
"(",
"'learning_rate'",
",",
"get_learning_rate",
"(",
"i",
",",
"n",
",",
"learning_rates",
")",
")",
"callback",
".",
"before_iteration",
"=",
"True",
"return",
"callback"
] |
Reset learning rate after iteration 1
NOTE: the initial learning rate will still take in-effect on first iteration.
Parameters
----------
learning_rates: list or function
List of learning rate for each boosting round
or a customized function that calculates eta in terms of
current number of round and the total number of boosting round (e.g.
yields learning rate decay)
* list ``l``: ``eta = l[boosting_round]``
* function ``f``: ``eta = f(boosting_round, num_boost_round)``
Returns
-------
callback : function
The requested callback function.
|
[
"Reset",
"learning",
"rate",
"after",
"iteration",
"1"
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/callback.py#L100-L145
|
21,575
|
dmlc/xgboost
|
python-package/xgboost/sklearn.py
|
_objective_decorator
|
def _objective_decorator(func):
"""Decorate an objective function
Converts an objective function using the typical sklearn metrics
signature so that it is usable with ``xgboost.training.train``
Parameters
----------
func: callable
Expects a callable with signature ``func(y_true, y_pred)``:
y_true: array_like of shape [n_samples]
The target values
y_pred: array_like of shape [n_samples]
The predicted values
Returns
-------
new_func: callable
The new objective function as expected by ``xgboost.training.train``.
The signature is ``new_func(preds, dmatrix)``:
preds: array_like, shape [n_samples]
The predicted values
dmatrix: ``DMatrix``
The training set from which the labels will be extracted using
``dmatrix.get_label()``
"""
def inner(preds, dmatrix):
"""internal function"""
labels = dmatrix.get_label()
return func(labels, preds)
return inner
|
python
|
def _objective_decorator(func):
"""Decorate an objective function
Converts an objective function using the typical sklearn metrics
signature so that it is usable with ``xgboost.training.train``
Parameters
----------
func: callable
Expects a callable with signature ``func(y_true, y_pred)``:
y_true: array_like of shape [n_samples]
The target values
y_pred: array_like of shape [n_samples]
The predicted values
Returns
-------
new_func: callable
The new objective function as expected by ``xgboost.training.train``.
The signature is ``new_func(preds, dmatrix)``:
preds: array_like, shape [n_samples]
The predicted values
dmatrix: ``DMatrix``
The training set from which the labels will be extracted using
``dmatrix.get_label()``
"""
def inner(preds, dmatrix):
"""internal function"""
labels = dmatrix.get_label()
return func(labels, preds)
return inner
|
[
"def",
"_objective_decorator",
"(",
"func",
")",
":",
"def",
"inner",
"(",
"preds",
",",
"dmatrix",
")",
":",
"\"\"\"internal function\"\"\"",
"labels",
"=",
"dmatrix",
".",
"get_label",
"(",
")",
"return",
"func",
"(",
"labels",
",",
"preds",
")",
"return",
"inner"
] |
Decorate an objective function
Converts an objective function using the typical sklearn metrics
signature so that it is usable with ``xgboost.training.train``
Parameters
----------
func: callable
Expects a callable with signature ``func(y_true, y_pred)``:
y_true: array_like of shape [n_samples]
The target values
y_pred: array_like of shape [n_samples]
The predicted values
Returns
-------
new_func: callable
The new objective function as expected by ``xgboost.training.train``.
The signature is ``new_func(preds, dmatrix)``:
preds: array_like, shape [n_samples]
The predicted values
dmatrix: ``DMatrix``
The training set from which the labels will be extracted using
``dmatrix.get_label()``
|
[
"Decorate",
"an",
"objective",
"function"
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/sklearn.py#L18-L50
|
21,576
|
dmlc/xgboost
|
python-package/xgboost/sklearn.py
|
XGBModel.apply
|
def apply(self, X, ntree_limit=0):
"""Return the predicted leaf every tree for each sample.
Parameters
----------
X : array_like, shape=[n_samples, n_features]
Input features matrix.
ntree_limit : int
Limit number of trees in the prediction; defaults to 0 (use all trees).
Returns
-------
X_leaves : array_like, shape=[n_samples, n_trees]
For each datapoint x in X and for each tree, return the index of the
leaf x ends up in. Leaves are numbered within
``[0; 2**(self.max_depth+1))``, possibly with gaps in the numbering.
"""
test_dmatrix = DMatrix(X, missing=self.missing, nthread=self.n_jobs)
return self.get_booster().predict(test_dmatrix,
pred_leaf=True,
ntree_limit=ntree_limit)
|
python
|
def apply(self, X, ntree_limit=0):
"""Return the predicted leaf every tree for each sample.
Parameters
----------
X : array_like, shape=[n_samples, n_features]
Input features matrix.
ntree_limit : int
Limit number of trees in the prediction; defaults to 0 (use all trees).
Returns
-------
X_leaves : array_like, shape=[n_samples, n_trees]
For each datapoint x in X and for each tree, return the index of the
leaf x ends up in. Leaves are numbered within
``[0; 2**(self.max_depth+1))``, possibly with gaps in the numbering.
"""
test_dmatrix = DMatrix(X, missing=self.missing, nthread=self.n_jobs)
return self.get_booster().predict(test_dmatrix,
pred_leaf=True,
ntree_limit=ntree_limit)
|
[
"def",
"apply",
"(",
"self",
",",
"X",
",",
"ntree_limit",
"=",
"0",
")",
":",
"test_dmatrix",
"=",
"DMatrix",
"(",
"X",
",",
"missing",
"=",
"self",
".",
"missing",
",",
"nthread",
"=",
"self",
".",
"n_jobs",
")",
"return",
"self",
".",
"get_booster",
"(",
")",
".",
"predict",
"(",
"test_dmatrix",
",",
"pred_leaf",
"=",
"True",
",",
"ntree_limit",
"=",
"ntree_limit",
")"
] |
Return the predicted leaf every tree for each sample.
Parameters
----------
X : array_like, shape=[n_samples, n_features]
Input features matrix.
ntree_limit : int
Limit number of trees in the prediction; defaults to 0 (use all trees).
Returns
-------
X_leaves : array_like, shape=[n_samples, n_trees]
For each datapoint x in X and for each tree, return the index of the
leaf x ends up in. Leaves are numbered within
``[0; 2**(self.max_depth+1))``, possibly with gaps in the numbering.
|
[
"Return",
"the",
"predicted",
"leaf",
"every",
"tree",
"for",
"each",
"sample",
"."
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/sklearn.py#L458-L479
|
21,577
|
dmlc/xgboost
|
python-package/xgboost/sklearn.py
|
XGBModel.feature_importances_
|
def feature_importances_(self):
"""
Feature importances property
.. note:: Feature importance is defined only for tree boosters
Feature importance is only defined when the decision tree model is chosen as base
learner (`booster=gbtree`). It is not defined for other base learner types, such
as linear learners (`booster=gblinear`).
Returns
-------
feature_importances_ : array of shape ``[n_features]``
"""
if getattr(self, 'booster', None) is not None and self.booster != 'gbtree':
raise AttributeError('Feature importance is not defined for Booster type {}'
.format(self.booster))
b = self.get_booster()
score = b.get_score(importance_type=self.importance_type)
all_features = [score.get(f, 0.) for f in b.feature_names]
all_features = np.array(all_features, dtype=np.float32)
return all_features / all_features.sum()
|
python
|
def feature_importances_(self):
"""
Feature importances property
.. note:: Feature importance is defined only for tree boosters
Feature importance is only defined when the decision tree model is chosen as base
learner (`booster=gbtree`). It is not defined for other base learner types, such
as linear learners (`booster=gblinear`).
Returns
-------
feature_importances_ : array of shape ``[n_features]``
"""
if getattr(self, 'booster', None) is not None and self.booster != 'gbtree':
raise AttributeError('Feature importance is not defined for Booster type {}'
.format(self.booster))
b = self.get_booster()
score = b.get_score(importance_type=self.importance_type)
all_features = [score.get(f, 0.) for f in b.feature_names]
all_features = np.array(all_features, dtype=np.float32)
return all_features / all_features.sum()
|
[
"def",
"feature_importances_",
"(",
"self",
")",
":",
"if",
"getattr",
"(",
"self",
",",
"'booster'",
",",
"None",
")",
"is",
"not",
"None",
"and",
"self",
".",
"booster",
"!=",
"'gbtree'",
":",
"raise",
"AttributeError",
"(",
"'Feature importance is not defined for Booster type {}'",
".",
"format",
"(",
"self",
".",
"booster",
")",
")",
"b",
"=",
"self",
".",
"get_booster",
"(",
")",
"score",
"=",
"b",
".",
"get_score",
"(",
"importance_type",
"=",
"self",
".",
"importance_type",
")",
"all_features",
"=",
"[",
"score",
".",
"get",
"(",
"f",
",",
"0.",
")",
"for",
"f",
"in",
"b",
".",
"feature_names",
"]",
"all_features",
"=",
"np",
".",
"array",
"(",
"all_features",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"return",
"all_features",
"/",
"all_features",
".",
"sum",
"(",
")"
] |
Feature importances property
.. note:: Feature importance is defined only for tree boosters
Feature importance is only defined when the decision tree model is chosen as base
learner (`booster=gbtree`). It is not defined for other base learner types, such
as linear learners (`booster=gblinear`).
Returns
-------
feature_importances_ : array of shape ``[n_features]``
|
[
"Feature",
"importances",
"property"
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/sklearn.py#L524-L546
|
21,578
|
dmlc/xgboost
|
python-package/xgboost/sklearn.py
|
XGBClassifier.predict_proba
|
def predict_proba(self, data, ntree_limit=None, validate_features=True):
"""
Predict the probability of each `data` example being of a given class.
.. note:: This function is not thread safe
For each booster object, predict can only be called from one thread.
If you want to run prediction using multiple thread, call ``xgb.copy()`` to make copies
of model object and then call predict
Parameters
----------
data : DMatrix
The dmatrix storing the input.
ntree_limit : int
Limit number of trees in the prediction; defaults to best_ntree_limit if defined
(i.e. it has been trained with early stopping), otherwise 0 (use all trees).
validate_features : bool
When this is True, validate that the Booster's and data's feature_names are identical.
Otherwise, it is assumed that the feature_names are the same.
Returns
-------
prediction : numpy array
a numpy array with the probability of each data example being of a given class.
"""
test_dmatrix = DMatrix(data, missing=self.missing, nthread=self.n_jobs)
if ntree_limit is None:
ntree_limit = getattr(self, "best_ntree_limit", 0)
class_probs = self.get_booster().predict(test_dmatrix,
ntree_limit=ntree_limit,
validate_features=validate_features)
if self.objective == "multi:softprob":
return class_probs
classone_probs = class_probs
classzero_probs = 1.0 - classone_probs
return np.vstack((classzero_probs, classone_probs)).transpose()
|
python
|
def predict_proba(self, data, ntree_limit=None, validate_features=True):
"""
Predict the probability of each `data` example being of a given class.
.. note:: This function is not thread safe
For each booster object, predict can only be called from one thread.
If you want to run prediction using multiple thread, call ``xgb.copy()`` to make copies
of model object and then call predict
Parameters
----------
data : DMatrix
The dmatrix storing the input.
ntree_limit : int
Limit number of trees in the prediction; defaults to best_ntree_limit if defined
(i.e. it has been trained with early stopping), otherwise 0 (use all trees).
validate_features : bool
When this is True, validate that the Booster's and data's feature_names are identical.
Otherwise, it is assumed that the feature_names are the same.
Returns
-------
prediction : numpy array
a numpy array with the probability of each data example being of a given class.
"""
test_dmatrix = DMatrix(data, missing=self.missing, nthread=self.n_jobs)
if ntree_limit is None:
ntree_limit = getattr(self, "best_ntree_limit", 0)
class_probs = self.get_booster().predict(test_dmatrix,
ntree_limit=ntree_limit,
validate_features=validate_features)
if self.objective == "multi:softprob":
return class_probs
classone_probs = class_probs
classzero_probs = 1.0 - classone_probs
return np.vstack((classzero_probs, classone_probs)).transpose()
|
[
"def",
"predict_proba",
"(",
"self",
",",
"data",
",",
"ntree_limit",
"=",
"None",
",",
"validate_features",
"=",
"True",
")",
":",
"test_dmatrix",
"=",
"DMatrix",
"(",
"data",
",",
"missing",
"=",
"self",
".",
"missing",
",",
"nthread",
"=",
"self",
".",
"n_jobs",
")",
"if",
"ntree_limit",
"is",
"None",
":",
"ntree_limit",
"=",
"getattr",
"(",
"self",
",",
"\"best_ntree_limit\"",
",",
"0",
")",
"class_probs",
"=",
"self",
".",
"get_booster",
"(",
")",
".",
"predict",
"(",
"test_dmatrix",
",",
"ntree_limit",
"=",
"ntree_limit",
",",
"validate_features",
"=",
"validate_features",
")",
"if",
"self",
".",
"objective",
"==",
"\"multi:softprob\"",
":",
"return",
"class_probs",
"classone_probs",
"=",
"class_probs",
"classzero_probs",
"=",
"1.0",
"-",
"classone_probs",
"return",
"np",
".",
"vstack",
"(",
"(",
"classzero_probs",
",",
"classone_probs",
")",
")",
".",
"transpose",
"(",
")"
] |
Predict the probability of each `data` example being of a given class.
.. note:: This function is not thread safe
For each booster object, predict can only be called from one thread.
If you want to run prediction using multiple thread, call ``xgb.copy()`` to make copies
of model object and then call predict
Parameters
----------
data : DMatrix
The dmatrix storing the input.
ntree_limit : int
Limit number of trees in the prediction; defaults to best_ntree_limit if defined
(i.e. it has been trained with early stopping), otherwise 0 (use all trees).
validate_features : bool
When this is True, validate that the Booster's and data's feature_names are identical.
Otherwise, it is assumed that the feature_names are the same.
Returns
-------
prediction : numpy array
a numpy array with the probability of each data example being of a given class.
|
[
"Predict",
"the",
"probability",
"of",
"each",
"data",
"example",
"being",
"of",
"a",
"given",
"class",
"."
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/sklearn.py#L803-L839
|
21,579
|
dmlc/xgboost
|
python-package/xgboost/core.py
|
from_pystr_to_cstr
|
def from_pystr_to_cstr(data):
"""Convert a list of Python str to C pointer
Parameters
----------
data : list
list of str
"""
if not isinstance(data, list):
raise NotImplementedError
pointers = (ctypes.c_char_p * len(data))()
if PY3:
data = [bytes(d, 'utf-8') for d in data]
else:
data = [d.encode('utf-8') if isinstance(d, unicode) else d # pylint: disable=undefined-variable
for d in data]
pointers[:] = data
return pointers
|
python
|
def from_pystr_to_cstr(data):
"""Convert a list of Python str to C pointer
Parameters
----------
data : list
list of str
"""
if not isinstance(data, list):
raise NotImplementedError
pointers = (ctypes.c_char_p * len(data))()
if PY3:
data = [bytes(d, 'utf-8') for d in data]
else:
data = [d.encode('utf-8') if isinstance(d, unicode) else d # pylint: disable=undefined-variable
for d in data]
pointers[:] = data
return pointers
|
[
"def",
"from_pystr_to_cstr",
"(",
"data",
")",
":",
"if",
"not",
"isinstance",
"(",
"data",
",",
"list",
")",
":",
"raise",
"NotImplementedError",
"pointers",
"=",
"(",
"ctypes",
".",
"c_char_p",
"*",
"len",
"(",
"data",
")",
")",
"(",
")",
"if",
"PY3",
":",
"data",
"=",
"[",
"bytes",
"(",
"d",
",",
"'utf-8'",
")",
"for",
"d",
"in",
"data",
"]",
"else",
":",
"data",
"=",
"[",
"d",
".",
"encode",
"(",
"'utf-8'",
")",
"if",
"isinstance",
"(",
"d",
",",
"unicode",
")",
"else",
"d",
"# pylint: disable=undefined-variable",
"for",
"d",
"in",
"data",
"]",
"pointers",
"[",
":",
"]",
"=",
"data",
"return",
"pointers"
] |
Convert a list of Python str to C pointer
Parameters
----------
data : list
list of str
|
[
"Convert",
"a",
"list",
"of",
"Python",
"str",
"to",
"C",
"pointer"
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/core.py#L60-L78
|
21,580
|
dmlc/xgboost
|
python-package/xgboost/core.py
|
from_cstr_to_pystr
|
def from_cstr_to_pystr(data, length):
"""Revert C pointer to Python str
Parameters
----------
data : ctypes pointer
pointer to data
length : ctypes pointer
pointer to length of data
"""
if PY3:
res = []
for i in range(length.value):
try:
res.append(str(data[i].decode('ascii')))
except UnicodeDecodeError:
res.append(str(data[i].decode('utf-8')))
else:
res = []
for i in range(length.value):
try:
res.append(str(data[i].decode('ascii')))
except UnicodeDecodeError:
# pylint: disable=undefined-variable
res.append(unicode(data[i].decode('utf-8')))
return res
|
python
|
def from_cstr_to_pystr(data, length):
"""Revert C pointer to Python str
Parameters
----------
data : ctypes pointer
pointer to data
length : ctypes pointer
pointer to length of data
"""
if PY3:
res = []
for i in range(length.value):
try:
res.append(str(data[i].decode('ascii')))
except UnicodeDecodeError:
res.append(str(data[i].decode('utf-8')))
else:
res = []
for i in range(length.value):
try:
res.append(str(data[i].decode('ascii')))
except UnicodeDecodeError:
# pylint: disable=undefined-variable
res.append(unicode(data[i].decode('utf-8')))
return res
|
[
"def",
"from_cstr_to_pystr",
"(",
"data",
",",
"length",
")",
":",
"if",
"PY3",
":",
"res",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"length",
".",
"value",
")",
":",
"try",
":",
"res",
".",
"append",
"(",
"str",
"(",
"data",
"[",
"i",
"]",
".",
"decode",
"(",
"'ascii'",
")",
")",
")",
"except",
"UnicodeDecodeError",
":",
"res",
".",
"append",
"(",
"str",
"(",
"data",
"[",
"i",
"]",
".",
"decode",
"(",
"'utf-8'",
")",
")",
")",
"else",
":",
"res",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"length",
".",
"value",
")",
":",
"try",
":",
"res",
".",
"append",
"(",
"str",
"(",
"data",
"[",
"i",
"]",
".",
"decode",
"(",
"'ascii'",
")",
")",
")",
"except",
"UnicodeDecodeError",
":",
"# pylint: disable=undefined-variable",
"res",
".",
"append",
"(",
"unicode",
"(",
"data",
"[",
"i",
"]",
".",
"decode",
"(",
"'utf-8'",
")",
")",
")",
"return",
"res"
] |
Revert C pointer to Python str
Parameters
----------
data : ctypes pointer
pointer to data
length : ctypes pointer
pointer to length of data
|
[
"Revert",
"C",
"pointer",
"to",
"Python",
"str"
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/core.py#L81-L106
|
21,581
|
dmlc/xgboost
|
python-package/xgboost/core.py
|
_load_lib
|
def _load_lib():
"""Load xgboost Library."""
lib_paths = find_lib_path()
if not lib_paths:
return None
try:
pathBackup = os.environ['PATH'].split(os.pathsep)
except KeyError:
pathBackup = []
lib_success = False
os_error_list = []
for lib_path in lib_paths:
try:
# needed when the lib is linked with non-system-available dependencies
os.environ['PATH'] = os.pathsep.join(pathBackup + [os.path.dirname(lib_path)])
lib = ctypes.cdll.LoadLibrary(lib_path)
lib_success = True
except OSError as e:
os_error_list.append(str(e))
continue
finally:
os.environ['PATH'] = os.pathsep.join(pathBackup)
if not lib_success:
libname = os.path.basename(lib_paths[0])
raise XGBoostError(
'XGBoost Library ({}) could not be loaded.\n'.format(libname) +
'Likely causes:\n' +
' * OpenMP runtime is not installed ' +
'(vcomp140.dll or libgomp-1.dll for Windows, ' +
'libgomp.so for UNIX-like OSes)\n' +
' * You are running 32-bit Python on a 64-bit OS\n' +
'Error message(s): {}\n'.format(os_error_list))
lib.XGBGetLastError.restype = ctypes.c_char_p
lib.callback = _get_log_callback_func()
if lib.XGBRegisterLogCallback(lib.callback) != 0:
raise XGBoostError(lib.XGBGetLastError())
return lib
|
python
|
def _load_lib():
"""Load xgboost Library."""
lib_paths = find_lib_path()
if not lib_paths:
return None
try:
pathBackup = os.environ['PATH'].split(os.pathsep)
except KeyError:
pathBackup = []
lib_success = False
os_error_list = []
for lib_path in lib_paths:
try:
# needed when the lib is linked with non-system-available dependencies
os.environ['PATH'] = os.pathsep.join(pathBackup + [os.path.dirname(lib_path)])
lib = ctypes.cdll.LoadLibrary(lib_path)
lib_success = True
except OSError as e:
os_error_list.append(str(e))
continue
finally:
os.environ['PATH'] = os.pathsep.join(pathBackup)
if not lib_success:
libname = os.path.basename(lib_paths[0])
raise XGBoostError(
'XGBoost Library ({}) could not be loaded.\n'.format(libname) +
'Likely causes:\n' +
' * OpenMP runtime is not installed ' +
'(vcomp140.dll or libgomp-1.dll for Windows, ' +
'libgomp.so for UNIX-like OSes)\n' +
' * You are running 32-bit Python on a 64-bit OS\n' +
'Error message(s): {}\n'.format(os_error_list))
lib.XGBGetLastError.restype = ctypes.c_char_p
lib.callback = _get_log_callback_func()
if lib.XGBRegisterLogCallback(lib.callback) != 0:
raise XGBoostError(lib.XGBGetLastError())
return lib
|
[
"def",
"_load_lib",
"(",
")",
":",
"lib_paths",
"=",
"find_lib_path",
"(",
")",
"if",
"not",
"lib_paths",
":",
"return",
"None",
"try",
":",
"pathBackup",
"=",
"os",
".",
"environ",
"[",
"'PATH'",
"]",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
"except",
"KeyError",
":",
"pathBackup",
"=",
"[",
"]",
"lib_success",
"=",
"False",
"os_error_list",
"=",
"[",
"]",
"for",
"lib_path",
"in",
"lib_paths",
":",
"try",
":",
"# needed when the lib is linked with non-system-available dependencies",
"os",
".",
"environ",
"[",
"'PATH'",
"]",
"=",
"os",
".",
"pathsep",
".",
"join",
"(",
"pathBackup",
"+",
"[",
"os",
".",
"path",
".",
"dirname",
"(",
"lib_path",
")",
"]",
")",
"lib",
"=",
"ctypes",
".",
"cdll",
".",
"LoadLibrary",
"(",
"lib_path",
")",
"lib_success",
"=",
"True",
"except",
"OSError",
"as",
"e",
":",
"os_error_list",
".",
"append",
"(",
"str",
"(",
"e",
")",
")",
"continue",
"finally",
":",
"os",
".",
"environ",
"[",
"'PATH'",
"]",
"=",
"os",
".",
"pathsep",
".",
"join",
"(",
"pathBackup",
")",
"if",
"not",
"lib_success",
":",
"libname",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"lib_paths",
"[",
"0",
"]",
")",
"raise",
"XGBoostError",
"(",
"'XGBoost Library ({}) could not be loaded.\\n'",
".",
"format",
"(",
"libname",
")",
"+",
"'Likely causes:\\n'",
"+",
"' * OpenMP runtime is not installed '",
"+",
"'(vcomp140.dll or libgomp-1.dll for Windows, '",
"+",
"'libgomp.so for UNIX-like OSes)\\n'",
"+",
"' * You are running 32-bit Python on a 64-bit OS\\n'",
"+",
"'Error message(s): {}\\n'",
".",
"format",
"(",
"os_error_list",
")",
")",
"lib",
".",
"XGBGetLastError",
".",
"restype",
"=",
"ctypes",
".",
"c_char_p",
"lib",
".",
"callback",
"=",
"_get_log_callback_func",
"(",
")",
"if",
"lib",
".",
"XGBRegisterLogCallback",
"(",
"lib",
".",
"callback",
")",
"!=",
"0",
":",
"raise",
"XGBoostError",
"(",
"lib",
".",
"XGBGetLastError",
"(",
")",
")",
"return",
"lib"
] |
Load xgboost Library.
|
[
"Load",
"xgboost",
"Library",
"."
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/core.py#L121-L157
|
21,582
|
dmlc/xgboost
|
python-package/xgboost/core.py
|
ctypes2buffer
|
def ctypes2buffer(cptr, length):
"""Convert ctypes pointer to buffer type."""
if not isinstance(cptr, ctypes.POINTER(ctypes.c_char)):
raise RuntimeError('expected char pointer')
res = bytearray(length)
rptr = (ctypes.c_char * length).from_buffer(res)
if not ctypes.memmove(rptr, cptr, length):
raise RuntimeError('memmove failed')
return res
|
python
|
def ctypes2buffer(cptr, length):
"""Convert ctypes pointer to buffer type."""
if not isinstance(cptr, ctypes.POINTER(ctypes.c_char)):
raise RuntimeError('expected char pointer')
res = bytearray(length)
rptr = (ctypes.c_char * length).from_buffer(res)
if not ctypes.memmove(rptr, cptr, length):
raise RuntimeError('memmove failed')
return res
|
[
"def",
"ctypes2buffer",
"(",
"cptr",
",",
"length",
")",
":",
"if",
"not",
"isinstance",
"(",
"cptr",
",",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_char",
")",
")",
":",
"raise",
"RuntimeError",
"(",
"'expected char pointer'",
")",
"res",
"=",
"bytearray",
"(",
"length",
")",
"rptr",
"=",
"(",
"ctypes",
".",
"c_char",
"*",
"length",
")",
".",
"from_buffer",
"(",
"res",
")",
"if",
"not",
"ctypes",
".",
"memmove",
"(",
"rptr",
",",
"cptr",
",",
"length",
")",
":",
"raise",
"RuntimeError",
"(",
"'memmove failed'",
")",
"return",
"res"
] |
Convert ctypes pointer to buffer type.
|
[
"Convert",
"ctypes",
"pointer",
"to",
"buffer",
"type",
"."
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/core.py#L197-L205
|
21,583
|
dmlc/xgboost
|
python-package/xgboost/core.py
|
c_array
|
def c_array(ctype, values):
"""Convert a python string to c array."""
if isinstance(values, np.ndarray) and values.dtype.itemsize == ctypes.sizeof(ctype):
return (ctype * len(values)).from_buffer_copy(values)
return (ctype * len(values))(*values)
|
python
|
def c_array(ctype, values):
"""Convert a python string to c array."""
if isinstance(values, np.ndarray) and values.dtype.itemsize == ctypes.sizeof(ctype):
return (ctype * len(values)).from_buffer_copy(values)
return (ctype * len(values))(*values)
|
[
"def",
"c_array",
"(",
"ctype",
",",
"values",
")",
":",
"if",
"isinstance",
"(",
"values",
",",
"np",
".",
"ndarray",
")",
"and",
"values",
".",
"dtype",
".",
"itemsize",
"==",
"ctypes",
".",
"sizeof",
"(",
"ctype",
")",
":",
"return",
"(",
"ctype",
"*",
"len",
"(",
"values",
")",
")",
".",
"from_buffer_copy",
"(",
"values",
")",
"return",
"(",
"ctype",
"*",
"len",
"(",
"values",
")",
")",
"(",
"*",
"values",
")"
] |
Convert a python string to c array.
|
[
"Convert",
"a",
"python",
"string",
"to",
"c",
"array",
"."
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/core.py#L213-L217
|
21,584
|
dmlc/xgboost
|
python-package/xgboost/core.py
|
_maybe_dt_array
|
def _maybe_dt_array(array):
""" Extract numpy array from single column data table """
if not isinstance(array, DataTable) or array is None:
return array
if array.shape[1] > 1:
raise ValueError('DataTable for label or weight cannot have multiple columns')
# below requires new dt version
# extract first column
array = array.to_numpy()[:, 0].astype('float')
return array
|
python
|
def _maybe_dt_array(array):
""" Extract numpy array from single column data table """
if not isinstance(array, DataTable) or array is None:
return array
if array.shape[1] > 1:
raise ValueError('DataTable for label or weight cannot have multiple columns')
# below requires new dt version
# extract first column
array = array.to_numpy()[:, 0].astype('float')
return array
|
[
"def",
"_maybe_dt_array",
"(",
"array",
")",
":",
"if",
"not",
"isinstance",
"(",
"array",
",",
"DataTable",
")",
"or",
"array",
"is",
"None",
":",
"return",
"array",
"if",
"array",
".",
"shape",
"[",
"1",
"]",
">",
"1",
":",
"raise",
"ValueError",
"(",
"'DataTable for label or weight cannot have multiple columns'",
")",
"# below requires new dt version",
"# extract first column",
"array",
"=",
"array",
".",
"to_numpy",
"(",
")",
"[",
":",
",",
"0",
"]",
".",
"astype",
"(",
"'float'",
")",
"return",
"array"
] |
Extract numpy array from single column data table
|
[
"Extract",
"numpy",
"array",
"from",
"single",
"column",
"data",
"table"
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/core.py#L306-L318
|
21,585
|
dmlc/xgboost
|
python-package/xgboost/core.py
|
DMatrix._init_from_dt
|
def _init_from_dt(self, data, nthread):
"""
Initialize data from a datatable Frame.
"""
ptrs = (ctypes.c_void_p * data.ncols)()
if hasattr(data, "internal") and hasattr(data.internal, "column"):
# datatable>0.8.0
for icol in range(data.ncols):
col = data.internal.column(icol)
ptr = col.data_pointer
ptrs[icol] = ctypes.c_void_p(ptr)
else:
# datatable<=0.8.0
from datatable.internal import frame_column_data_r # pylint: disable=no-name-in-module,import-error
for icol in range(data.ncols):
ptrs[icol] = frame_column_data_r(data, icol)
# always return stypes for dt ingestion
feature_type_strings = (ctypes.c_char_p * data.ncols)()
for icol in range(data.ncols):
feature_type_strings[icol] = ctypes.c_char_p(data.stypes[icol].name.encode('utf-8'))
handle = ctypes.c_void_p()
_check_call(_LIB.XGDMatrixCreateFromDT(
ptrs, feature_type_strings,
c_bst_ulong(data.shape[0]),
c_bst_ulong(data.shape[1]),
ctypes.byref(handle),
nthread))
self.handle = handle
|
python
|
def _init_from_dt(self, data, nthread):
"""
Initialize data from a datatable Frame.
"""
ptrs = (ctypes.c_void_p * data.ncols)()
if hasattr(data, "internal") and hasattr(data.internal, "column"):
# datatable>0.8.0
for icol in range(data.ncols):
col = data.internal.column(icol)
ptr = col.data_pointer
ptrs[icol] = ctypes.c_void_p(ptr)
else:
# datatable<=0.8.0
from datatable.internal import frame_column_data_r # pylint: disable=no-name-in-module,import-error
for icol in range(data.ncols):
ptrs[icol] = frame_column_data_r(data, icol)
# always return stypes for dt ingestion
feature_type_strings = (ctypes.c_char_p * data.ncols)()
for icol in range(data.ncols):
feature_type_strings[icol] = ctypes.c_char_p(data.stypes[icol].name.encode('utf-8'))
handle = ctypes.c_void_p()
_check_call(_LIB.XGDMatrixCreateFromDT(
ptrs, feature_type_strings,
c_bst_ulong(data.shape[0]),
c_bst_ulong(data.shape[1]),
ctypes.byref(handle),
nthread))
self.handle = handle
|
[
"def",
"_init_from_dt",
"(",
"self",
",",
"data",
",",
"nthread",
")",
":",
"ptrs",
"=",
"(",
"ctypes",
".",
"c_void_p",
"*",
"data",
".",
"ncols",
")",
"(",
")",
"if",
"hasattr",
"(",
"data",
",",
"\"internal\"",
")",
"and",
"hasattr",
"(",
"data",
".",
"internal",
",",
"\"column\"",
")",
":",
"# datatable>0.8.0",
"for",
"icol",
"in",
"range",
"(",
"data",
".",
"ncols",
")",
":",
"col",
"=",
"data",
".",
"internal",
".",
"column",
"(",
"icol",
")",
"ptr",
"=",
"col",
".",
"data_pointer",
"ptrs",
"[",
"icol",
"]",
"=",
"ctypes",
".",
"c_void_p",
"(",
"ptr",
")",
"else",
":",
"# datatable<=0.8.0",
"from",
"datatable",
".",
"internal",
"import",
"frame_column_data_r",
"# pylint: disable=no-name-in-module,import-error",
"for",
"icol",
"in",
"range",
"(",
"data",
".",
"ncols",
")",
":",
"ptrs",
"[",
"icol",
"]",
"=",
"frame_column_data_r",
"(",
"data",
",",
"icol",
")",
"# always return stypes for dt ingestion",
"feature_type_strings",
"=",
"(",
"ctypes",
".",
"c_char_p",
"*",
"data",
".",
"ncols",
")",
"(",
")",
"for",
"icol",
"in",
"range",
"(",
"data",
".",
"ncols",
")",
":",
"feature_type_strings",
"[",
"icol",
"]",
"=",
"ctypes",
".",
"c_char_p",
"(",
"data",
".",
"stypes",
"[",
"icol",
"]",
".",
"name",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"handle",
"=",
"ctypes",
".",
"c_void_p",
"(",
")",
"_check_call",
"(",
"_LIB",
".",
"XGDMatrixCreateFromDT",
"(",
"ptrs",
",",
"feature_type_strings",
",",
"c_bst_ulong",
"(",
"data",
".",
"shape",
"[",
"0",
"]",
")",
",",
"c_bst_ulong",
"(",
"data",
".",
"shape",
"[",
"1",
"]",
")",
",",
"ctypes",
".",
"byref",
"(",
"handle",
")",
",",
"nthread",
")",
")",
"self",
".",
"handle",
"=",
"handle"
] |
Initialize data from a datatable Frame.
|
[
"Initialize",
"data",
"from",
"a",
"datatable",
"Frame",
"."
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/core.py#L498-L527
|
21,586
|
dmlc/xgboost
|
python-package/xgboost/core.py
|
DMatrix.set_float_info_npy2d
|
def set_float_info_npy2d(self, field, data):
"""Set float type property into the DMatrix
for numpy 2d array input
Parameters
----------
field: str
The field name of the information
data: numpy array
The array of data to be set
"""
if getattr(data, 'base', None) is not None and \
data.base is not None and isinstance(data, np.ndarray) \
and isinstance(data.base, np.ndarray) and (not data.flags.c_contiguous):
warnings.warn("Use subset (sliced data) of np.ndarray is not recommended " +
"because it will generate extra copies and increase memory consumption")
data = np.array(data, copy=True, dtype=np.float32)
else:
data = np.array(data, copy=False, dtype=np.float32)
c_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
_check_call(_LIB.XGDMatrixSetFloatInfo(self.handle,
c_str(field),
c_data,
c_bst_ulong(len(data))))
|
python
|
def set_float_info_npy2d(self, field, data):
"""Set float type property into the DMatrix
for numpy 2d array input
Parameters
----------
field: str
The field name of the information
data: numpy array
The array of data to be set
"""
if getattr(data, 'base', None) is not None and \
data.base is not None and isinstance(data, np.ndarray) \
and isinstance(data.base, np.ndarray) and (not data.flags.c_contiguous):
warnings.warn("Use subset (sliced data) of np.ndarray is not recommended " +
"because it will generate extra copies and increase memory consumption")
data = np.array(data, copy=True, dtype=np.float32)
else:
data = np.array(data, copy=False, dtype=np.float32)
c_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
_check_call(_LIB.XGDMatrixSetFloatInfo(self.handle,
c_str(field),
c_data,
c_bst_ulong(len(data))))
|
[
"def",
"set_float_info_npy2d",
"(",
"self",
",",
"field",
",",
"data",
")",
":",
"if",
"getattr",
"(",
"data",
",",
"'base'",
",",
"None",
")",
"is",
"not",
"None",
"and",
"data",
".",
"base",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"data",
",",
"np",
".",
"ndarray",
")",
"and",
"isinstance",
"(",
"data",
".",
"base",
",",
"np",
".",
"ndarray",
")",
"and",
"(",
"not",
"data",
".",
"flags",
".",
"c_contiguous",
")",
":",
"warnings",
".",
"warn",
"(",
"\"Use subset (sliced data) of np.ndarray is not recommended \"",
"+",
"\"because it will generate extra copies and increase memory consumption\"",
")",
"data",
"=",
"np",
".",
"array",
"(",
"data",
",",
"copy",
"=",
"True",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"else",
":",
"data",
"=",
"np",
".",
"array",
"(",
"data",
",",
"copy",
"=",
"False",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"c_data",
"=",
"data",
".",
"ctypes",
".",
"data_as",
"(",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_float",
")",
")",
"_check_call",
"(",
"_LIB",
".",
"XGDMatrixSetFloatInfo",
"(",
"self",
".",
"handle",
",",
"c_str",
"(",
"field",
")",
",",
"c_data",
",",
"c_bst_ulong",
"(",
"len",
"(",
"data",
")",
")",
")",
")"
] |
Set float type property into the DMatrix
for numpy 2d array input
Parameters
----------
field: str
The field name of the information
data: numpy array
The array of data to be set
|
[
"Set",
"float",
"type",
"property",
"into",
"the",
"DMatrix",
"for",
"numpy",
"2d",
"array",
"input"
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/core.py#L598-L622
|
21,587
|
dmlc/xgboost
|
python-package/xgboost/core.py
|
Booster.load_rabit_checkpoint
|
def load_rabit_checkpoint(self):
"""Initialize the model by load from rabit checkpoint.
Returns
-------
version: integer
The version number of the model.
"""
version = ctypes.c_int()
_check_call(_LIB.XGBoosterLoadRabitCheckpoint(
self.handle, ctypes.byref(version)))
return version.value
|
python
|
def load_rabit_checkpoint(self):
"""Initialize the model by load from rabit checkpoint.
Returns
-------
version: integer
The version number of the model.
"""
version = ctypes.c_int()
_check_call(_LIB.XGBoosterLoadRabitCheckpoint(
self.handle, ctypes.byref(version)))
return version.value
|
[
"def",
"load_rabit_checkpoint",
"(",
"self",
")",
":",
"version",
"=",
"ctypes",
".",
"c_int",
"(",
")",
"_check_call",
"(",
"_LIB",
".",
"XGBoosterLoadRabitCheckpoint",
"(",
"self",
".",
"handle",
",",
"ctypes",
".",
"byref",
"(",
"version",
")",
")",
")",
"return",
"version",
".",
"value"
] |
Initialize the model by load from rabit checkpoint.
Returns
-------
version: integer
The version number of the model.
|
[
"Initialize",
"the",
"model",
"by",
"load",
"from",
"rabit",
"checkpoint",
"."
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/core.py#L1002-L1013
|
21,588
|
dmlc/xgboost
|
python-package/xgboost/core.py
|
Booster.attr
|
def attr(self, key):
"""Get attribute string from the Booster.
Parameters
----------
key : str
The key to get attribute from.
Returns
-------
value : str
The attribute value of the key, returns None if attribute do not exist.
"""
ret = ctypes.c_char_p()
success = ctypes.c_int()
_check_call(_LIB.XGBoosterGetAttr(
self.handle, c_str(key), ctypes.byref(ret), ctypes.byref(success)))
if success.value != 0:
return py_str(ret.value)
return None
|
python
|
def attr(self, key):
"""Get attribute string from the Booster.
Parameters
----------
key : str
The key to get attribute from.
Returns
-------
value : str
The attribute value of the key, returns None if attribute do not exist.
"""
ret = ctypes.c_char_p()
success = ctypes.c_int()
_check_call(_LIB.XGBoosterGetAttr(
self.handle, c_str(key), ctypes.byref(ret), ctypes.byref(success)))
if success.value != 0:
return py_str(ret.value)
return None
|
[
"def",
"attr",
"(",
"self",
",",
"key",
")",
":",
"ret",
"=",
"ctypes",
".",
"c_char_p",
"(",
")",
"success",
"=",
"ctypes",
".",
"c_int",
"(",
")",
"_check_call",
"(",
"_LIB",
".",
"XGBoosterGetAttr",
"(",
"self",
".",
"handle",
",",
"c_str",
"(",
"key",
")",
",",
"ctypes",
".",
"byref",
"(",
"ret",
")",
",",
"ctypes",
".",
"byref",
"(",
"success",
")",
")",
")",
"if",
"success",
".",
"value",
"!=",
"0",
":",
"return",
"py_str",
"(",
"ret",
".",
"value",
")",
"return",
"None"
] |
Get attribute string from the Booster.
Parameters
----------
key : str
The key to get attribute from.
Returns
-------
value : str
The attribute value of the key, returns None if attribute do not exist.
|
[
"Get",
"attribute",
"string",
"from",
"the",
"Booster",
"."
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/core.py#L1019-L1038
|
21,589
|
dmlc/xgboost
|
python-package/xgboost/core.py
|
Booster.attributes
|
def attributes(self):
"""Get attributes stored in the Booster as a dictionary.
Returns
-------
result : dictionary of attribute_name: attribute_value pairs of strings.
Returns an empty dict if there's no attributes.
"""
length = c_bst_ulong()
sarr = ctypes.POINTER(ctypes.c_char_p)()
_check_call(_LIB.XGBoosterGetAttrNames(self.handle,
ctypes.byref(length),
ctypes.byref(sarr)))
attr_names = from_cstr_to_pystr(sarr, length)
return {n: self.attr(n) for n in attr_names}
|
python
|
def attributes(self):
"""Get attributes stored in the Booster as a dictionary.
Returns
-------
result : dictionary of attribute_name: attribute_value pairs of strings.
Returns an empty dict if there's no attributes.
"""
length = c_bst_ulong()
sarr = ctypes.POINTER(ctypes.c_char_p)()
_check_call(_LIB.XGBoosterGetAttrNames(self.handle,
ctypes.byref(length),
ctypes.byref(sarr)))
attr_names = from_cstr_to_pystr(sarr, length)
return {n: self.attr(n) for n in attr_names}
|
[
"def",
"attributes",
"(",
"self",
")",
":",
"length",
"=",
"c_bst_ulong",
"(",
")",
"sarr",
"=",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_char_p",
")",
"(",
")",
"_check_call",
"(",
"_LIB",
".",
"XGBoosterGetAttrNames",
"(",
"self",
".",
"handle",
",",
"ctypes",
".",
"byref",
"(",
"length",
")",
",",
"ctypes",
".",
"byref",
"(",
"sarr",
")",
")",
")",
"attr_names",
"=",
"from_cstr_to_pystr",
"(",
"sarr",
",",
"length",
")",
"return",
"{",
"n",
":",
"self",
".",
"attr",
"(",
"n",
")",
"for",
"n",
"in",
"attr_names",
"}"
] |
Get attributes stored in the Booster as a dictionary.
Returns
-------
result : dictionary of attribute_name: attribute_value pairs of strings.
Returns an empty dict if there's no attributes.
|
[
"Get",
"attributes",
"stored",
"in",
"the",
"Booster",
"as",
"a",
"dictionary",
"."
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/core.py#L1040-L1054
|
21,590
|
dmlc/xgboost
|
python-package/xgboost/core.py
|
Booster.set_attr
|
def set_attr(self, **kwargs):
"""Set the attribute of the Booster.
Parameters
----------
**kwargs
The attributes to set. Setting a value to None deletes an attribute.
"""
for key, value in kwargs.items():
if value is not None:
if not isinstance(value, STRING_TYPES):
raise ValueError("Set Attr only accepts string values")
value = c_str(str(value))
_check_call(_LIB.XGBoosterSetAttr(
self.handle, c_str(key), value))
|
python
|
def set_attr(self, **kwargs):
"""Set the attribute of the Booster.
Parameters
----------
**kwargs
The attributes to set. Setting a value to None deletes an attribute.
"""
for key, value in kwargs.items():
if value is not None:
if not isinstance(value, STRING_TYPES):
raise ValueError("Set Attr only accepts string values")
value = c_str(str(value))
_check_call(_LIB.XGBoosterSetAttr(
self.handle, c_str(key), value))
|
[
"def",
"set_attr",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"key",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"STRING_TYPES",
")",
":",
"raise",
"ValueError",
"(",
"\"Set Attr only accepts string values\"",
")",
"value",
"=",
"c_str",
"(",
"str",
"(",
"value",
")",
")",
"_check_call",
"(",
"_LIB",
".",
"XGBoosterSetAttr",
"(",
"self",
".",
"handle",
",",
"c_str",
"(",
"key",
")",
",",
"value",
")",
")"
] |
Set the attribute of the Booster.
Parameters
----------
**kwargs
The attributes to set. Setting a value to None deletes an attribute.
|
[
"Set",
"the",
"attribute",
"of",
"the",
"Booster",
"."
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/core.py#L1056-L1070
|
21,591
|
dmlc/xgboost
|
python-package/xgboost/core.py
|
Booster.set_param
|
def set_param(self, params, value=None):
"""Set parameters into the Booster.
Parameters
----------
params: dict/list/str
list of key,value pairs, dict of key to value or simply str key
value: optional
value of the specified parameter, when params is str key
"""
if isinstance(params, Mapping):
params = params.items()
elif isinstance(params, STRING_TYPES) and value is not None:
params = [(params, value)]
for key, val in params:
_check_call(_LIB.XGBoosterSetParam(self.handle, c_str(key), c_str(str(val))))
|
python
|
def set_param(self, params, value=None):
"""Set parameters into the Booster.
Parameters
----------
params: dict/list/str
list of key,value pairs, dict of key to value or simply str key
value: optional
value of the specified parameter, when params is str key
"""
if isinstance(params, Mapping):
params = params.items()
elif isinstance(params, STRING_TYPES) and value is not None:
params = [(params, value)]
for key, val in params:
_check_call(_LIB.XGBoosterSetParam(self.handle, c_str(key), c_str(str(val))))
|
[
"def",
"set_param",
"(",
"self",
",",
"params",
",",
"value",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"params",
",",
"Mapping",
")",
":",
"params",
"=",
"params",
".",
"items",
"(",
")",
"elif",
"isinstance",
"(",
"params",
",",
"STRING_TYPES",
")",
"and",
"value",
"is",
"not",
"None",
":",
"params",
"=",
"[",
"(",
"params",
",",
"value",
")",
"]",
"for",
"key",
",",
"val",
"in",
"params",
":",
"_check_call",
"(",
"_LIB",
".",
"XGBoosterSetParam",
"(",
"self",
".",
"handle",
",",
"c_str",
"(",
"key",
")",
",",
"c_str",
"(",
"str",
"(",
"val",
")",
")",
")",
")"
] |
Set parameters into the Booster.
Parameters
----------
params: dict/list/str
list of key,value pairs, dict of key to value or simply str key
value: optional
value of the specified parameter, when params is str key
|
[
"Set",
"parameters",
"into",
"the",
"Booster",
"."
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/core.py#L1072-L1087
|
21,592
|
dmlc/xgboost
|
python-package/xgboost/core.py
|
Booster.eval
|
def eval(self, data, name='eval', iteration=0):
"""Evaluate the model on mat.
Parameters
----------
data : DMatrix
The dmatrix storing the input.
name : str, optional
The name of the dataset.
iteration : int, optional
The current iteration number.
Returns
-------
result: str
Evaluation result string.
"""
self._validate_features(data)
return self.eval_set([(data, name)], iteration)
|
python
|
def eval(self, data, name='eval', iteration=0):
"""Evaluate the model on mat.
Parameters
----------
data : DMatrix
The dmatrix storing the input.
name : str, optional
The name of the dataset.
iteration : int, optional
The current iteration number.
Returns
-------
result: str
Evaluation result string.
"""
self._validate_features(data)
return self.eval_set([(data, name)], iteration)
|
[
"def",
"eval",
"(",
"self",
",",
"data",
",",
"name",
"=",
"'eval'",
",",
"iteration",
"=",
"0",
")",
":",
"self",
".",
"_validate_features",
"(",
"data",
")",
"return",
"self",
".",
"eval_set",
"(",
"[",
"(",
"data",
",",
"name",
")",
"]",
",",
"iteration",
")"
] |
Evaluate the model on mat.
Parameters
----------
data : DMatrix
The dmatrix storing the input.
name : str, optional
The name of the dataset.
iteration : int, optional
The current iteration number.
Returns
-------
result: str
Evaluation result string.
|
[
"Evaluate",
"the",
"model",
"on",
"mat",
"."
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/core.py#L1185-L1205
|
21,593
|
dmlc/xgboost
|
python-package/xgboost/core.py
|
Booster.save_model
|
def save_model(self, fname):
"""
Save the model to a file.
The model is saved in an XGBoost internal binary format which is
universal among the various XGBoost interfaces. Auxiliary attributes of
the Python Booster object (such as feature_names) will not be saved.
To preserve all attributes, pickle the Booster object.
Parameters
----------
fname : string
Output file name
"""
if isinstance(fname, STRING_TYPES): # assume file name
_check_call(_LIB.XGBoosterSaveModel(self.handle, c_str(fname)))
else:
raise TypeError("fname must be a string")
|
python
|
def save_model(self, fname):
"""
Save the model to a file.
The model is saved in an XGBoost internal binary format which is
universal among the various XGBoost interfaces. Auxiliary attributes of
the Python Booster object (such as feature_names) will not be saved.
To preserve all attributes, pickle the Booster object.
Parameters
----------
fname : string
Output file name
"""
if isinstance(fname, STRING_TYPES): # assume file name
_check_call(_LIB.XGBoosterSaveModel(self.handle, c_str(fname)))
else:
raise TypeError("fname must be a string")
|
[
"def",
"save_model",
"(",
"self",
",",
"fname",
")",
":",
"if",
"isinstance",
"(",
"fname",
",",
"STRING_TYPES",
")",
":",
"# assume file name",
"_check_call",
"(",
"_LIB",
".",
"XGBoosterSaveModel",
"(",
"self",
".",
"handle",
",",
"c_str",
"(",
"fname",
")",
")",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"fname must be a string\"",
")"
] |
Save the model to a file.
The model is saved in an XGBoost internal binary format which is
universal among the various XGBoost interfaces. Auxiliary attributes of
the Python Booster object (such as feature_names) will not be saved.
To preserve all attributes, pickle the Booster object.
Parameters
----------
fname : string
Output file name
|
[
"Save",
"the",
"model",
"to",
"a",
"file",
"."
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/core.py#L1316-L1333
|
21,594
|
dmlc/xgboost
|
python-package/xgboost/core.py
|
Booster.dump_model
|
def dump_model(self, fout, fmap='', with_stats=False, dump_format="text"):
"""
Dump model into a text or JSON file.
Parameters
----------
fout : string
Output file name.
fmap : string, optional
Name of the file containing feature map names.
with_stats : bool, optional
Controls whether the split statistics are output.
dump_format : string, optional
Format of model dump file. Can be 'text' or 'json'.
"""
if isinstance(fout, STRING_TYPES):
fout = open(fout, 'w')
need_close = True
else:
need_close = False
ret = self.get_dump(fmap, with_stats, dump_format)
if dump_format == 'json':
fout.write('[\n')
for i, _ in enumerate(ret):
fout.write(ret[i])
if i < len(ret) - 1:
fout.write(",\n")
fout.write('\n]')
else:
for i, _ in enumerate(ret):
fout.write('booster[{}]:\n'.format(i))
fout.write(ret[i])
if need_close:
fout.close()
|
python
|
def dump_model(self, fout, fmap='', with_stats=False, dump_format="text"):
"""
Dump model into a text or JSON file.
Parameters
----------
fout : string
Output file name.
fmap : string, optional
Name of the file containing feature map names.
with_stats : bool, optional
Controls whether the split statistics are output.
dump_format : string, optional
Format of model dump file. Can be 'text' or 'json'.
"""
if isinstance(fout, STRING_TYPES):
fout = open(fout, 'w')
need_close = True
else:
need_close = False
ret = self.get_dump(fmap, with_stats, dump_format)
if dump_format == 'json':
fout.write('[\n')
for i, _ in enumerate(ret):
fout.write(ret[i])
if i < len(ret) - 1:
fout.write(",\n")
fout.write('\n]')
else:
for i, _ in enumerate(ret):
fout.write('booster[{}]:\n'.format(i))
fout.write(ret[i])
if need_close:
fout.close()
|
[
"def",
"dump_model",
"(",
"self",
",",
"fout",
",",
"fmap",
"=",
"''",
",",
"with_stats",
"=",
"False",
",",
"dump_format",
"=",
"\"text\"",
")",
":",
"if",
"isinstance",
"(",
"fout",
",",
"STRING_TYPES",
")",
":",
"fout",
"=",
"open",
"(",
"fout",
",",
"'w'",
")",
"need_close",
"=",
"True",
"else",
":",
"need_close",
"=",
"False",
"ret",
"=",
"self",
".",
"get_dump",
"(",
"fmap",
",",
"with_stats",
",",
"dump_format",
")",
"if",
"dump_format",
"==",
"'json'",
":",
"fout",
".",
"write",
"(",
"'[\\n'",
")",
"for",
"i",
",",
"_",
"in",
"enumerate",
"(",
"ret",
")",
":",
"fout",
".",
"write",
"(",
"ret",
"[",
"i",
"]",
")",
"if",
"i",
"<",
"len",
"(",
"ret",
")",
"-",
"1",
":",
"fout",
".",
"write",
"(",
"\",\\n\"",
")",
"fout",
".",
"write",
"(",
"'\\n]'",
")",
"else",
":",
"for",
"i",
",",
"_",
"in",
"enumerate",
"(",
"ret",
")",
":",
"fout",
".",
"write",
"(",
"'booster[{}]:\\n'",
".",
"format",
"(",
"i",
")",
")",
"fout",
".",
"write",
"(",
"ret",
"[",
"i",
"]",
")",
"if",
"need_close",
":",
"fout",
".",
"close",
"(",
")"
] |
Dump model into a text or JSON file.
Parameters
----------
fout : string
Output file name.
fmap : string, optional
Name of the file containing feature map names.
with_stats : bool, optional
Controls whether the split statistics are output.
dump_format : string, optional
Format of model dump file. Can be 'text' or 'json'.
|
[
"Dump",
"model",
"into",
"a",
"text",
"or",
"JSON",
"file",
"."
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/core.py#L1373-L1406
|
21,595
|
dmlc/xgboost
|
python-package/xgboost/core.py
|
Booster.get_dump
|
def get_dump(self, fmap='', with_stats=False, dump_format="text"):
"""
Returns the model dump as a list of strings.
Parameters
----------
fmap : string, optional
Name of the file containing feature map names.
with_stats : bool, optional
Controls whether the split statistics are output.
dump_format : string, optional
Format of model dump. Can be 'text' or 'json'.
"""
length = c_bst_ulong()
sarr = ctypes.POINTER(ctypes.c_char_p)()
if self.feature_names is not None and fmap == '':
flen = len(self.feature_names)
fname = from_pystr_to_cstr(self.feature_names)
if self.feature_types is None:
# use quantitative as default
# {'q': quantitative, 'i': indicator}
ftype = from_pystr_to_cstr(['q'] * flen)
else:
ftype = from_pystr_to_cstr(self.feature_types)
_check_call(_LIB.XGBoosterDumpModelExWithFeatures(
self.handle,
ctypes.c_int(flen),
fname,
ftype,
ctypes.c_int(with_stats),
c_str(dump_format),
ctypes.byref(length),
ctypes.byref(sarr)))
else:
if fmap != '' and not os.path.exists(fmap):
raise ValueError("No such file: {0}".format(fmap))
_check_call(_LIB.XGBoosterDumpModelEx(self.handle,
c_str(fmap),
ctypes.c_int(with_stats),
c_str(dump_format),
ctypes.byref(length),
ctypes.byref(sarr)))
res = from_cstr_to_pystr(sarr, length)
return res
|
python
|
def get_dump(self, fmap='', with_stats=False, dump_format="text"):
"""
Returns the model dump as a list of strings.
Parameters
----------
fmap : string, optional
Name of the file containing feature map names.
with_stats : bool, optional
Controls whether the split statistics are output.
dump_format : string, optional
Format of model dump. Can be 'text' or 'json'.
"""
length = c_bst_ulong()
sarr = ctypes.POINTER(ctypes.c_char_p)()
if self.feature_names is not None and fmap == '':
flen = len(self.feature_names)
fname = from_pystr_to_cstr(self.feature_names)
if self.feature_types is None:
# use quantitative as default
# {'q': quantitative, 'i': indicator}
ftype = from_pystr_to_cstr(['q'] * flen)
else:
ftype = from_pystr_to_cstr(self.feature_types)
_check_call(_LIB.XGBoosterDumpModelExWithFeatures(
self.handle,
ctypes.c_int(flen),
fname,
ftype,
ctypes.c_int(with_stats),
c_str(dump_format),
ctypes.byref(length),
ctypes.byref(sarr)))
else:
if fmap != '' and not os.path.exists(fmap):
raise ValueError("No such file: {0}".format(fmap))
_check_call(_LIB.XGBoosterDumpModelEx(self.handle,
c_str(fmap),
ctypes.c_int(with_stats),
c_str(dump_format),
ctypes.byref(length),
ctypes.byref(sarr)))
res = from_cstr_to_pystr(sarr, length)
return res
|
[
"def",
"get_dump",
"(",
"self",
",",
"fmap",
"=",
"''",
",",
"with_stats",
"=",
"False",
",",
"dump_format",
"=",
"\"text\"",
")",
":",
"length",
"=",
"c_bst_ulong",
"(",
")",
"sarr",
"=",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_char_p",
")",
"(",
")",
"if",
"self",
".",
"feature_names",
"is",
"not",
"None",
"and",
"fmap",
"==",
"''",
":",
"flen",
"=",
"len",
"(",
"self",
".",
"feature_names",
")",
"fname",
"=",
"from_pystr_to_cstr",
"(",
"self",
".",
"feature_names",
")",
"if",
"self",
".",
"feature_types",
"is",
"None",
":",
"# use quantitative as default",
"# {'q': quantitative, 'i': indicator}",
"ftype",
"=",
"from_pystr_to_cstr",
"(",
"[",
"'q'",
"]",
"*",
"flen",
")",
"else",
":",
"ftype",
"=",
"from_pystr_to_cstr",
"(",
"self",
".",
"feature_types",
")",
"_check_call",
"(",
"_LIB",
".",
"XGBoosterDumpModelExWithFeatures",
"(",
"self",
".",
"handle",
",",
"ctypes",
".",
"c_int",
"(",
"flen",
")",
",",
"fname",
",",
"ftype",
",",
"ctypes",
".",
"c_int",
"(",
"with_stats",
")",
",",
"c_str",
"(",
"dump_format",
")",
",",
"ctypes",
".",
"byref",
"(",
"length",
")",
",",
"ctypes",
".",
"byref",
"(",
"sarr",
")",
")",
")",
"else",
":",
"if",
"fmap",
"!=",
"''",
"and",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"fmap",
")",
":",
"raise",
"ValueError",
"(",
"\"No such file: {0}\"",
".",
"format",
"(",
"fmap",
")",
")",
"_check_call",
"(",
"_LIB",
".",
"XGBoosterDumpModelEx",
"(",
"self",
".",
"handle",
",",
"c_str",
"(",
"fmap",
")",
",",
"ctypes",
".",
"c_int",
"(",
"with_stats",
")",
",",
"c_str",
"(",
"dump_format",
")",
",",
"ctypes",
".",
"byref",
"(",
"length",
")",
",",
"ctypes",
".",
"byref",
"(",
"sarr",
")",
")",
")",
"res",
"=",
"from_cstr_to_pystr",
"(",
"sarr",
",",
"length",
")",
"return",
"res"
] |
Returns the model dump as a list of strings.
Parameters
----------
fmap : string, optional
Name of the file containing feature map names.
with_stats : bool, optional
Controls whether the split statistics are output.
dump_format : string, optional
Format of model dump. Can be 'text' or 'json'.
|
[
"Returns",
"the",
"model",
"dump",
"as",
"a",
"list",
"of",
"strings",
"."
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/core.py#L1408-L1453
|
21,596
|
dmlc/xgboost
|
python-package/xgboost/core.py
|
Booster.get_split_value_histogram
|
def get_split_value_histogram(self, feature, fmap='', bins=None, as_pandas=True):
"""Get split value histogram of a feature
Parameters
----------
feature: str
The name of the feature.
fmap: str (optional)
The name of feature map file.
bin: int, default None
The maximum number of bins.
Number of bins equals number of unique split values n_unique,
if bins == None or bins > n_unique.
as_pandas: bool, default True
Return pd.DataFrame when pandas is installed.
If False or pandas is not installed, return numpy ndarray.
Returns
-------
a histogram of used splitting values for the specified feature
either as numpy array or pandas DataFrame.
"""
xgdump = self.get_dump(fmap=fmap)
values = []
regexp = re.compile(r"\[{0}<([\d.Ee+-]+)\]".format(feature))
for i, _ in enumerate(xgdump):
m = re.findall(regexp, xgdump[i])
values.extend([float(x) for x in m])
n_unique = len(np.unique(values))
bins = max(min(n_unique, bins) if bins is not None else n_unique, 1)
nph = np.histogram(values, bins=bins)
nph = np.column_stack((nph[1][1:], nph[0]))
nph = nph[nph[:, 1] > 0]
if as_pandas and PANDAS_INSTALLED:
return DataFrame(nph, columns=['SplitValue', 'Count'])
if as_pandas and not PANDAS_INSTALLED:
sys.stderr.write(
"Returning histogram as ndarray (as_pandas == True, but pandas is not installed).")
return nph
|
python
|
def get_split_value_histogram(self, feature, fmap='', bins=None, as_pandas=True):
"""Get split value histogram of a feature
Parameters
----------
feature: str
The name of the feature.
fmap: str (optional)
The name of feature map file.
bin: int, default None
The maximum number of bins.
Number of bins equals number of unique split values n_unique,
if bins == None or bins > n_unique.
as_pandas: bool, default True
Return pd.DataFrame when pandas is installed.
If False or pandas is not installed, return numpy ndarray.
Returns
-------
a histogram of used splitting values for the specified feature
either as numpy array or pandas DataFrame.
"""
xgdump = self.get_dump(fmap=fmap)
values = []
regexp = re.compile(r"\[{0}<([\d.Ee+-]+)\]".format(feature))
for i, _ in enumerate(xgdump):
m = re.findall(regexp, xgdump[i])
values.extend([float(x) for x in m])
n_unique = len(np.unique(values))
bins = max(min(n_unique, bins) if bins is not None else n_unique, 1)
nph = np.histogram(values, bins=bins)
nph = np.column_stack((nph[1][1:], nph[0]))
nph = nph[nph[:, 1] > 0]
if as_pandas and PANDAS_INSTALLED:
return DataFrame(nph, columns=['SplitValue', 'Count'])
if as_pandas and not PANDAS_INSTALLED:
sys.stderr.write(
"Returning histogram as ndarray (as_pandas == True, but pandas is not installed).")
return nph
|
[
"def",
"get_split_value_histogram",
"(",
"self",
",",
"feature",
",",
"fmap",
"=",
"''",
",",
"bins",
"=",
"None",
",",
"as_pandas",
"=",
"True",
")",
":",
"xgdump",
"=",
"self",
".",
"get_dump",
"(",
"fmap",
"=",
"fmap",
")",
"values",
"=",
"[",
"]",
"regexp",
"=",
"re",
".",
"compile",
"(",
"r\"\\[{0}<([\\d.Ee+-]+)\\]\"",
".",
"format",
"(",
"feature",
")",
")",
"for",
"i",
",",
"_",
"in",
"enumerate",
"(",
"xgdump",
")",
":",
"m",
"=",
"re",
".",
"findall",
"(",
"regexp",
",",
"xgdump",
"[",
"i",
"]",
")",
"values",
".",
"extend",
"(",
"[",
"float",
"(",
"x",
")",
"for",
"x",
"in",
"m",
"]",
")",
"n_unique",
"=",
"len",
"(",
"np",
".",
"unique",
"(",
"values",
")",
")",
"bins",
"=",
"max",
"(",
"min",
"(",
"n_unique",
",",
"bins",
")",
"if",
"bins",
"is",
"not",
"None",
"else",
"n_unique",
",",
"1",
")",
"nph",
"=",
"np",
".",
"histogram",
"(",
"values",
",",
"bins",
"=",
"bins",
")",
"nph",
"=",
"np",
".",
"column_stack",
"(",
"(",
"nph",
"[",
"1",
"]",
"[",
"1",
":",
"]",
",",
"nph",
"[",
"0",
"]",
")",
")",
"nph",
"=",
"nph",
"[",
"nph",
"[",
":",
",",
"1",
"]",
">",
"0",
"]",
"if",
"as_pandas",
"and",
"PANDAS_INSTALLED",
":",
"return",
"DataFrame",
"(",
"nph",
",",
"columns",
"=",
"[",
"'SplitValue'",
",",
"'Count'",
"]",
")",
"if",
"as_pandas",
"and",
"not",
"PANDAS_INSTALLED",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"Returning histogram as ndarray (as_pandas == True, but pandas is not installed).\"",
")",
"return",
"nph"
] |
Get split value histogram of a feature
Parameters
----------
feature: str
The name of the feature.
fmap: str (optional)
The name of feature map file.
bin: int, default None
The maximum number of bins.
Number of bins equals number of unique split values n_unique,
if bins == None or bins > n_unique.
as_pandas: bool, default True
Return pd.DataFrame when pandas is installed.
If False or pandas is not installed, return numpy ndarray.
Returns
-------
a histogram of used splitting values for the specified feature
either as numpy array or pandas DataFrame.
|
[
"Get",
"split",
"value",
"histogram",
"of",
"a",
"feature"
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/core.py#L1692-L1733
|
21,597
|
dmlc/xgboost
|
python-package/xgboost/plotting.py
|
plot_importance
|
def plot_importance(booster, ax=None, height=0.2,
xlim=None, ylim=None, title='Feature importance',
xlabel='F score', ylabel='Features',
importance_type='weight', max_num_features=None,
grid=True, show_values=True, **kwargs):
"""Plot importance based on fitted trees.
Parameters
----------
booster : Booster, XGBModel or dict
Booster or XGBModel instance, or dict taken by Booster.get_fscore()
ax : matplotlib Axes, default None
Target axes instance. If None, new figure and axes will be created.
grid : bool, Turn the axes grids on or off. Default is True (On).
importance_type : str, default "weight"
How the importance is calculated: either "weight", "gain", or "cover"
* "weight" is the number of times a feature appears in a tree
* "gain" is the average gain of splits which use the feature
* "cover" is the average coverage of splits which use the feature
where coverage is defined as the number of samples affected by the split
max_num_features : int, default None
Maximum number of top features displayed on plot. If None, all features will be displayed.
height : float, default 0.2
Bar height, passed to ax.barh()
xlim : tuple, default None
Tuple passed to axes.xlim()
ylim : tuple, default None
Tuple passed to axes.ylim()
title : str, default "Feature importance"
Axes title. To disable, pass None.
xlabel : str, default "F score"
X axis title label. To disable, pass None.
ylabel : str, default "Features"
Y axis title label. To disable, pass None.
show_values : bool, default True
Show values on plot. To disable, pass False.
kwargs :
Other keywords passed to ax.barh()
Returns
-------
ax : matplotlib Axes
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError('You must install matplotlib to plot importance')
if isinstance(booster, XGBModel):
importance = booster.get_booster().get_score(importance_type=importance_type)
elif isinstance(booster, Booster):
importance = booster.get_score(importance_type=importance_type)
elif isinstance(booster, dict):
importance = booster
else:
raise ValueError('tree must be Booster, XGBModel or dict instance')
if not importance:
raise ValueError('Booster.get_score() results in empty')
tuples = [(k, importance[k]) for k in importance]
if max_num_features is not None:
# pylint: disable=invalid-unary-operand-type
tuples = sorted(tuples, key=lambda x: x[1])[-max_num_features:]
else:
tuples = sorted(tuples, key=lambda x: x[1])
labels, values = zip(*tuples)
if ax is None:
_, ax = plt.subplots(1, 1)
ylocs = np.arange(len(values))
ax.barh(ylocs, values, align='center', height=height, **kwargs)
if show_values is True:
for x, y in zip(values, ylocs):
ax.text(x + 1, y, x, va='center')
ax.set_yticks(ylocs)
ax.set_yticklabels(labels)
if xlim is not None:
if not isinstance(xlim, tuple) or len(xlim) != 2:
raise ValueError('xlim must be a tuple of 2 elements')
else:
xlim = (0, max(values) * 1.1)
ax.set_xlim(xlim)
if ylim is not None:
if not isinstance(ylim, tuple) or len(ylim) != 2:
raise ValueError('ylim must be a tuple of 2 elements')
else:
ylim = (-1, len(values))
ax.set_ylim(ylim)
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
ax.grid(grid)
return ax
|
python
|
def plot_importance(booster, ax=None, height=0.2,
xlim=None, ylim=None, title='Feature importance',
xlabel='F score', ylabel='Features',
importance_type='weight', max_num_features=None,
grid=True, show_values=True, **kwargs):
"""Plot importance based on fitted trees.
Parameters
----------
booster : Booster, XGBModel or dict
Booster or XGBModel instance, or dict taken by Booster.get_fscore()
ax : matplotlib Axes, default None
Target axes instance. If None, new figure and axes will be created.
grid : bool, Turn the axes grids on or off. Default is True (On).
importance_type : str, default "weight"
How the importance is calculated: either "weight", "gain", or "cover"
* "weight" is the number of times a feature appears in a tree
* "gain" is the average gain of splits which use the feature
* "cover" is the average coverage of splits which use the feature
where coverage is defined as the number of samples affected by the split
max_num_features : int, default None
Maximum number of top features displayed on plot. If None, all features will be displayed.
height : float, default 0.2
Bar height, passed to ax.barh()
xlim : tuple, default None
Tuple passed to axes.xlim()
ylim : tuple, default None
Tuple passed to axes.ylim()
title : str, default "Feature importance"
Axes title. To disable, pass None.
xlabel : str, default "F score"
X axis title label. To disable, pass None.
ylabel : str, default "Features"
Y axis title label. To disable, pass None.
show_values : bool, default True
Show values on plot. To disable, pass False.
kwargs :
Other keywords passed to ax.barh()
Returns
-------
ax : matplotlib Axes
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError('You must install matplotlib to plot importance')
if isinstance(booster, XGBModel):
importance = booster.get_booster().get_score(importance_type=importance_type)
elif isinstance(booster, Booster):
importance = booster.get_score(importance_type=importance_type)
elif isinstance(booster, dict):
importance = booster
else:
raise ValueError('tree must be Booster, XGBModel or dict instance')
if not importance:
raise ValueError('Booster.get_score() results in empty')
tuples = [(k, importance[k]) for k in importance]
if max_num_features is not None:
# pylint: disable=invalid-unary-operand-type
tuples = sorted(tuples, key=lambda x: x[1])[-max_num_features:]
else:
tuples = sorted(tuples, key=lambda x: x[1])
labels, values = zip(*tuples)
if ax is None:
_, ax = plt.subplots(1, 1)
ylocs = np.arange(len(values))
ax.barh(ylocs, values, align='center', height=height, **kwargs)
if show_values is True:
for x, y in zip(values, ylocs):
ax.text(x + 1, y, x, va='center')
ax.set_yticks(ylocs)
ax.set_yticklabels(labels)
if xlim is not None:
if not isinstance(xlim, tuple) or len(xlim) != 2:
raise ValueError('xlim must be a tuple of 2 elements')
else:
xlim = (0, max(values) * 1.1)
ax.set_xlim(xlim)
if ylim is not None:
if not isinstance(ylim, tuple) or len(ylim) != 2:
raise ValueError('ylim must be a tuple of 2 elements')
else:
ylim = (-1, len(values))
ax.set_ylim(ylim)
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
ax.grid(grid)
return ax
|
[
"def",
"plot_importance",
"(",
"booster",
",",
"ax",
"=",
"None",
",",
"height",
"=",
"0.2",
",",
"xlim",
"=",
"None",
",",
"ylim",
"=",
"None",
",",
"title",
"=",
"'Feature importance'",
",",
"xlabel",
"=",
"'F score'",
",",
"ylabel",
"=",
"'Features'",
",",
"importance_type",
"=",
"'weight'",
",",
"max_num_features",
"=",
"None",
",",
"grid",
"=",
"True",
",",
"show_values",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"except",
"ImportError",
":",
"raise",
"ImportError",
"(",
"'You must install matplotlib to plot importance'",
")",
"if",
"isinstance",
"(",
"booster",
",",
"XGBModel",
")",
":",
"importance",
"=",
"booster",
".",
"get_booster",
"(",
")",
".",
"get_score",
"(",
"importance_type",
"=",
"importance_type",
")",
"elif",
"isinstance",
"(",
"booster",
",",
"Booster",
")",
":",
"importance",
"=",
"booster",
".",
"get_score",
"(",
"importance_type",
"=",
"importance_type",
")",
"elif",
"isinstance",
"(",
"booster",
",",
"dict",
")",
":",
"importance",
"=",
"booster",
"else",
":",
"raise",
"ValueError",
"(",
"'tree must be Booster, XGBModel or dict instance'",
")",
"if",
"not",
"importance",
":",
"raise",
"ValueError",
"(",
"'Booster.get_score() results in empty'",
")",
"tuples",
"=",
"[",
"(",
"k",
",",
"importance",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"importance",
"]",
"if",
"max_num_features",
"is",
"not",
"None",
":",
"# pylint: disable=invalid-unary-operand-type",
"tuples",
"=",
"sorted",
"(",
"tuples",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
")",
"[",
"-",
"max_num_features",
":",
"]",
"else",
":",
"tuples",
"=",
"sorted",
"(",
"tuples",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
")",
"labels",
",",
"values",
"=",
"zip",
"(",
"*",
"tuples",
")",
"if",
"ax",
"is",
"None",
":",
"_",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
"1",
",",
"1",
")",
"ylocs",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"values",
")",
")",
"ax",
".",
"barh",
"(",
"ylocs",
",",
"values",
",",
"align",
"=",
"'center'",
",",
"height",
"=",
"height",
",",
"*",
"*",
"kwargs",
")",
"if",
"show_values",
"is",
"True",
":",
"for",
"x",
",",
"y",
"in",
"zip",
"(",
"values",
",",
"ylocs",
")",
":",
"ax",
".",
"text",
"(",
"x",
"+",
"1",
",",
"y",
",",
"x",
",",
"va",
"=",
"'center'",
")",
"ax",
".",
"set_yticks",
"(",
"ylocs",
")",
"ax",
".",
"set_yticklabels",
"(",
"labels",
")",
"if",
"xlim",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"xlim",
",",
"tuple",
")",
"or",
"len",
"(",
"xlim",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'xlim must be a tuple of 2 elements'",
")",
"else",
":",
"xlim",
"=",
"(",
"0",
",",
"max",
"(",
"values",
")",
"*",
"1.1",
")",
"ax",
".",
"set_xlim",
"(",
"xlim",
")",
"if",
"ylim",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"ylim",
",",
"tuple",
")",
"or",
"len",
"(",
"ylim",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'ylim must be a tuple of 2 elements'",
")",
"else",
":",
"ylim",
"=",
"(",
"-",
"1",
",",
"len",
"(",
"values",
")",
")",
"ax",
".",
"set_ylim",
"(",
"ylim",
")",
"if",
"title",
"is",
"not",
"None",
":",
"ax",
".",
"set_title",
"(",
"title",
")",
"if",
"xlabel",
"is",
"not",
"None",
":",
"ax",
".",
"set_xlabel",
"(",
"xlabel",
")",
"if",
"ylabel",
"is",
"not",
"None",
":",
"ax",
".",
"set_ylabel",
"(",
"ylabel",
")",
"ax",
".",
"grid",
"(",
"grid",
")",
"return",
"ax"
] |
Plot importance based on fitted trees.
Parameters
----------
booster : Booster, XGBModel or dict
Booster or XGBModel instance, or dict taken by Booster.get_fscore()
ax : matplotlib Axes, default None
Target axes instance. If None, new figure and axes will be created.
grid : bool, Turn the axes grids on or off. Default is True (On).
importance_type : str, default "weight"
How the importance is calculated: either "weight", "gain", or "cover"
* "weight" is the number of times a feature appears in a tree
* "gain" is the average gain of splits which use the feature
* "cover" is the average coverage of splits which use the feature
where coverage is defined as the number of samples affected by the split
max_num_features : int, default None
Maximum number of top features displayed on plot. If None, all features will be displayed.
height : float, default 0.2
Bar height, passed to ax.barh()
xlim : tuple, default None
Tuple passed to axes.xlim()
ylim : tuple, default None
Tuple passed to axes.ylim()
title : str, default "Feature importance"
Axes title. To disable, pass None.
xlabel : str, default "F score"
X axis title label. To disable, pass None.
ylabel : str, default "Features"
Y axis title label. To disable, pass None.
show_values : bool, default True
Show values on plot. To disable, pass False.
kwargs :
Other keywords passed to ax.barh()
Returns
-------
ax : matplotlib Axes
|
[
"Plot",
"importance",
"based",
"on",
"fitted",
"trees",
"."
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/plotting.py#L14-L117
|
21,598
|
dmlc/xgboost
|
python-package/xgboost/plotting.py
|
_parse_edge
|
def _parse_edge(graph, node, text, yes_color='#0000FF', no_color='#FF0000'):
"""parse dumped edge"""
try:
match = _EDGEPAT.match(text)
if match is not None:
yes, no, missing = match.groups()
if yes == missing:
graph.edge(node, yes, label='yes, missing', color=yes_color)
graph.edge(node, no, label='no', color=no_color)
else:
graph.edge(node, yes, label='yes', color=yes_color)
graph.edge(node, no, label='no, missing', color=no_color)
return
except ValueError:
pass
match = _EDGEPAT2.match(text)
if match is not None:
yes, no = match.groups()
graph.edge(node, yes, label='yes', color=yes_color)
graph.edge(node, no, label='no', color=no_color)
return
raise ValueError('Unable to parse edge: {0}'.format(text))
|
python
|
def _parse_edge(graph, node, text, yes_color='#0000FF', no_color='#FF0000'):
"""parse dumped edge"""
try:
match = _EDGEPAT.match(text)
if match is not None:
yes, no, missing = match.groups()
if yes == missing:
graph.edge(node, yes, label='yes, missing', color=yes_color)
graph.edge(node, no, label='no', color=no_color)
else:
graph.edge(node, yes, label='yes', color=yes_color)
graph.edge(node, no, label='no, missing', color=no_color)
return
except ValueError:
pass
match = _EDGEPAT2.match(text)
if match is not None:
yes, no = match.groups()
graph.edge(node, yes, label='yes', color=yes_color)
graph.edge(node, no, label='no', color=no_color)
return
raise ValueError('Unable to parse edge: {0}'.format(text))
|
[
"def",
"_parse_edge",
"(",
"graph",
",",
"node",
",",
"text",
",",
"yes_color",
"=",
"'#0000FF'",
",",
"no_color",
"=",
"'#FF0000'",
")",
":",
"try",
":",
"match",
"=",
"_EDGEPAT",
".",
"match",
"(",
"text",
")",
"if",
"match",
"is",
"not",
"None",
":",
"yes",
",",
"no",
",",
"missing",
"=",
"match",
".",
"groups",
"(",
")",
"if",
"yes",
"==",
"missing",
":",
"graph",
".",
"edge",
"(",
"node",
",",
"yes",
",",
"label",
"=",
"'yes, missing'",
",",
"color",
"=",
"yes_color",
")",
"graph",
".",
"edge",
"(",
"node",
",",
"no",
",",
"label",
"=",
"'no'",
",",
"color",
"=",
"no_color",
")",
"else",
":",
"graph",
".",
"edge",
"(",
"node",
",",
"yes",
",",
"label",
"=",
"'yes'",
",",
"color",
"=",
"yes_color",
")",
"graph",
".",
"edge",
"(",
"node",
",",
"no",
",",
"label",
"=",
"'no, missing'",
",",
"color",
"=",
"no_color",
")",
"return",
"except",
"ValueError",
":",
"pass",
"match",
"=",
"_EDGEPAT2",
".",
"match",
"(",
"text",
")",
"if",
"match",
"is",
"not",
"None",
":",
"yes",
",",
"no",
"=",
"match",
".",
"groups",
"(",
")",
"graph",
".",
"edge",
"(",
"node",
",",
"yes",
",",
"label",
"=",
"'yes'",
",",
"color",
"=",
"yes_color",
")",
"graph",
".",
"edge",
"(",
"node",
",",
"no",
",",
"label",
"=",
"'no'",
",",
"color",
"=",
"no_color",
")",
"return",
"raise",
"ValueError",
"(",
"'Unable to parse edge: {0}'",
".",
"format",
"(",
"text",
")",
")"
] |
parse dumped edge
|
[
"parse",
"dumped",
"edge"
] |
253fdd8a42d5ec6b819788199584d27bf9ea6253
|
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/plotting.py#L141-L162
|
21,599
|
tzutalin/labelImg
|
libs/utils.py
|
newAction
|
def newAction(parent, text, slot=None, shortcut=None, icon=None,
tip=None, checkable=False, enabled=True):
"""Create a new action and assign callbacks, shortcuts, etc."""
a = QAction(text, parent)
if icon is not None:
a.setIcon(newIcon(icon))
if shortcut is not None:
if isinstance(shortcut, (list, tuple)):
a.setShortcuts(shortcut)
else:
a.setShortcut(shortcut)
if tip is not None:
a.setToolTip(tip)
a.setStatusTip(tip)
if slot is not None:
a.triggered.connect(slot)
if checkable:
a.setCheckable(True)
a.setEnabled(enabled)
return a
|
python
|
def newAction(parent, text, slot=None, shortcut=None, icon=None,
tip=None, checkable=False, enabled=True):
"""Create a new action and assign callbacks, shortcuts, etc."""
a = QAction(text, parent)
if icon is not None:
a.setIcon(newIcon(icon))
if shortcut is not None:
if isinstance(shortcut, (list, tuple)):
a.setShortcuts(shortcut)
else:
a.setShortcut(shortcut)
if tip is not None:
a.setToolTip(tip)
a.setStatusTip(tip)
if slot is not None:
a.triggered.connect(slot)
if checkable:
a.setCheckable(True)
a.setEnabled(enabled)
return a
|
[
"def",
"newAction",
"(",
"parent",
",",
"text",
",",
"slot",
"=",
"None",
",",
"shortcut",
"=",
"None",
",",
"icon",
"=",
"None",
",",
"tip",
"=",
"None",
",",
"checkable",
"=",
"False",
",",
"enabled",
"=",
"True",
")",
":",
"a",
"=",
"QAction",
"(",
"text",
",",
"parent",
")",
"if",
"icon",
"is",
"not",
"None",
":",
"a",
".",
"setIcon",
"(",
"newIcon",
"(",
"icon",
")",
")",
"if",
"shortcut",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"shortcut",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"a",
".",
"setShortcuts",
"(",
"shortcut",
")",
"else",
":",
"a",
".",
"setShortcut",
"(",
"shortcut",
")",
"if",
"tip",
"is",
"not",
"None",
":",
"a",
".",
"setToolTip",
"(",
"tip",
")",
"a",
".",
"setStatusTip",
"(",
"tip",
")",
"if",
"slot",
"is",
"not",
"None",
":",
"a",
".",
"triggered",
".",
"connect",
"(",
"slot",
")",
"if",
"checkable",
":",
"a",
".",
"setCheckable",
"(",
"True",
")",
"a",
".",
"setEnabled",
"(",
"enabled",
")",
"return",
"a"
] |
Create a new action and assign callbacks, shortcuts, etc.
|
[
"Create",
"a",
"new",
"action",
"and",
"assign",
"callbacks",
"shortcuts",
"etc",
"."
] |
6afd15aa88f89f41254e0004ed219b3965eb2c0d
|
https://github.com/tzutalin/labelImg/blob/6afd15aa88f89f41254e0004ed219b3965eb2c0d/libs/utils.py#L29-L48
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.