partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
test
|
Sections.get_sections_with_students_in_course
|
Return list of sections including students for the passed course ID.
|
uw_canvas/sections.py
|
def get_sections_with_students_in_course(self, course_id, params={}):
"""
Return list of sections including students for the passed course ID.
"""
include = params.get("include", [])
if "students" not in include:
include.append("students")
params["include"] = include
return self.get_sections_in_course(course_id, params)
|
def get_sections_with_students_in_course(self, course_id, params={}):
"""
Return list of sections including students for the passed course ID.
"""
include = params.get("include", [])
if "students" not in include:
include.append("students")
params["include"] = include
return self.get_sections_in_course(course_id, params)
|
[
"Return",
"list",
"of",
"sections",
"including",
"students",
"for",
"the",
"passed",
"course",
"ID",
"."
] |
uw-it-aca/uw-restclients-canvas
|
python
|
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/sections.py#L46-L55
|
[
"def",
"get_sections_with_students_in_course",
"(",
"self",
",",
"course_id",
",",
"params",
"=",
"{",
"}",
")",
":",
"include",
"=",
"params",
".",
"get",
"(",
"\"include\"",
",",
"[",
"]",
")",
"if",
"\"students\"",
"not",
"in",
"include",
":",
"include",
".",
"append",
"(",
"\"students\"",
")",
"params",
"[",
"\"include\"",
"]",
"=",
"include",
"return",
"self",
".",
"get_sections_in_course",
"(",
"course_id",
",",
"params",
")"
] |
9845faf33d49a8f06908efc22640c001116d6ea2
|
test
|
Sections.get_sections_with_students_in_course_by_sis_id
|
Return list of sections including students for the passed sis ID.
|
uw_canvas/sections.py
|
def get_sections_with_students_in_course_by_sis_id(self, sis_course_id,
params={}):
"""
Return list of sections including students for the passed sis ID.
"""
return self.get_sections_with_students_in_course(
self._sis_id(sis_course_id, sis_field="course"), params)
|
def get_sections_with_students_in_course_by_sis_id(self, sis_course_id,
params={}):
"""
Return list of sections including students for the passed sis ID.
"""
return self.get_sections_with_students_in_course(
self._sis_id(sis_course_id, sis_field="course"), params)
|
[
"Return",
"list",
"of",
"sections",
"including",
"students",
"for",
"the",
"passed",
"sis",
"ID",
"."
] |
uw-it-aca/uw-restclients-canvas
|
python
|
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/sections.py#L57-L63
|
[
"def",
"get_sections_with_students_in_course_by_sis_id",
"(",
"self",
",",
"sis_course_id",
",",
"params",
"=",
"{",
"}",
")",
":",
"return",
"self",
".",
"get_sections_with_students_in_course",
"(",
"self",
".",
"_sis_id",
"(",
"sis_course_id",
",",
"sis_field",
"=",
"\"course\"",
")",
",",
"params",
")"
] |
9845faf33d49a8f06908efc22640c001116d6ea2
|
test
|
Sections.create_section
|
Create a canvas section in the given course id.
https://canvas.instructure.com/doc/api/sections.html#method.sections.create
|
uw_canvas/sections.py
|
def create_section(self, course_id, name, sis_section_id):
"""
Create a canvas section in the given course id.
https://canvas.instructure.com/doc/api/sections.html#method.sections.create
"""
url = COURSES_API.format(course_id) + "/sections"
body = {"course_section": {"name": name,
"sis_section_id": sis_section_id}}
return CanvasSection(data=self._post_resource(url, body))
|
def create_section(self, course_id, name, sis_section_id):
"""
Create a canvas section in the given course id.
https://canvas.instructure.com/doc/api/sections.html#method.sections.create
"""
url = COURSES_API.format(course_id) + "/sections"
body = {"course_section": {"name": name,
"sis_section_id": sis_section_id}}
return CanvasSection(data=self._post_resource(url, body))
|
[
"Create",
"a",
"canvas",
"section",
"in",
"the",
"given",
"course",
"id",
"."
] |
uw-it-aca/uw-restclients-canvas
|
python
|
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/sections.py#L65-L75
|
[
"def",
"create_section",
"(",
"self",
",",
"course_id",
",",
"name",
",",
"sis_section_id",
")",
":",
"url",
"=",
"COURSES_API",
".",
"format",
"(",
"course_id",
")",
"+",
"\"/sections\"",
"body",
"=",
"{",
"\"course_section\"",
":",
"{",
"\"name\"",
":",
"name",
",",
"\"sis_section_id\"",
":",
"sis_section_id",
"}",
"}",
"return",
"CanvasSection",
"(",
"data",
"=",
"self",
".",
"_post_resource",
"(",
"url",
",",
"body",
")",
")"
] |
9845faf33d49a8f06908efc22640c001116d6ea2
|
test
|
Sections.update_section
|
Update a canvas section with the given section id.
https://canvas.instructure.com/doc/api/sections.html#method.sections.update
|
uw_canvas/sections.py
|
def update_section(self, section_id, name, sis_section_id):
"""
Update a canvas section with the given section id.
https://canvas.instructure.com/doc/api/sections.html#method.sections.update
"""
url = SECTIONS_API.format(section_id)
body = {"course_section": {}}
if name:
body["course_section"]["name"] = name
if sis_section_id:
body["course_section"]["sis_section_id"] = sis_section_id
return CanvasSection(data=self._put_resource(url, body))
|
def update_section(self, section_id, name, sis_section_id):
"""
Update a canvas section with the given section id.
https://canvas.instructure.com/doc/api/sections.html#method.sections.update
"""
url = SECTIONS_API.format(section_id)
body = {"course_section": {}}
if name:
body["course_section"]["name"] = name
if sis_section_id:
body["course_section"]["sis_section_id"] = sis_section_id
return CanvasSection(data=self._put_resource(url, body))
|
[
"Update",
"a",
"canvas",
"section",
"with",
"the",
"given",
"section",
"id",
"."
] |
uw-it-aca/uw-restclients-canvas
|
python
|
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/sections.py#L77-L92
|
[
"def",
"update_section",
"(",
"self",
",",
"section_id",
",",
"name",
",",
"sis_section_id",
")",
":",
"url",
"=",
"SECTIONS_API",
".",
"format",
"(",
"section_id",
")",
"body",
"=",
"{",
"\"course_section\"",
":",
"{",
"}",
"}",
"if",
"name",
":",
"body",
"[",
"\"course_section\"",
"]",
"[",
"\"name\"",
"]",
"=",
"name",
"if",
"sis_section_id",
":",
"body",
"[",
"\"course_section\"",
"]",
"[",
"\"sis_section_id\"",
"]",
"=",
"sis_section_id",
"return",
"CanvasSection",
"(",
"data",
"=",
"self",
".",
"_put_resource",
"(",
"url",
",",
"body",
")",
")"
] |
9845faf33d49a8f06908efc22640c001116d6ea2
|
test
|
Quizzes.get_quizzes
|
List quizzes for a given course
https://canvas.instructure.com/doc/api/quizzes.html#method.quizzes_api.index
|
uw_canvas/quizzes.py
|
def get_quizzes(self, course_id):
"""
List quizzes for a given course
https://canvas.instructure.com/doc/api/quizzes.html#method.quizzes_api.index
"""
url = QUIZZES_API.format(course_id)
data = self._get_resource(url)
quizzes = []
for datum in data:
quizzes.append(Quiz(data=datum))
return quizzes
|
def get_quizzes(self, course_id):
"""
List quizzes for a given course
https://canvas.instructure.com/doc/api/quizzes.html#method.quizzes_api.index
"""
url = QUIZZES_API.format(course_id)
data = self._get_resource(url)
quizzes = []
for datum in data:
quizzes.append(Quiz(data=datum))
return quizzes
|
[
"List",
"quizzes",
"for",
"a",
"given",
"course"
] |
uw-it-aca/uw-restclients-canvas
|
python
|
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/quizzes.py#L17-L28
|
[
"def",
"get_quizzes",
"(",
"self",
",",
"course_id",
")",
":",
"url",
"=",
"QUIZZES_API",
".",
"format",
"(",
"course_id",
")",
"data",
"=",
"self",
".",
"_get_resource",
"(",
"url",
")",
"quizzes",
"=",
"[",
"]",
"for",
"datum",
"in",
"data",
":",
"quizzes",
".",
"append",
"(",
"Quiz",
"(",
"data",
"=",
"datum",
")",
")",
"return",
"quizzes"
] |
9845faf33d49a8f06908efc22640c001116d6ea2
|
test
|
Accounts.get_account
|
Return account resource for given canvas account id.
https://canvas.instructure.com/doc/api/accounts.html#method.accounts.show
|
uw_canvas/accounts.py
|
def get_account(self, account_id):
"""
Return account resource for given canvas account id.
https://canvas.instructure.com/doc/api/accounts.html#method.accounts.show
"""
url = ACCOUNTS_API.format(account_id)
return CanvasAccount(data=self._get_resource(url))
|
def get_account(self, account_id):
"""
Return account resource for given canvas account id.
https://canvas.instructure.com/doc/api/accounts.html#method.accounts.show
"""
url = ACCOUNTS_API.format(account_id)
return CanvasAccount(data=self._get_resource(url))
|
[
"Return",
"account",
"resource",
"for",
"given",
"canvas",
"account",
"id",
"."
] |
uw-it-aca/uw-restclients-canvas
|
python
|
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/accounts.py#L8-L15
|
[
"def",
"get_account",
"(",
"self",
",",
"account_id",
")",
":",
"url",
"=",
"ACCOUNTS_API",
".",
"format",
"(",
"account_id",
")",
"return",
"CanvasAccount",
"(",
"data",
"=",
"self",
".",
"_get_resource",
"(",
"url",
")",
")"
] |
9845faf33d49a8f06908efc22640c001116d6ea2
|
test
|
Accounts.get_sub_accounts
|
Return list of subaccounts within the account with the passed
canvas id.
https://canvas.instructure.com/doc/api/accounts.html#method.accounts.sub_accounts
|
uw_canvas/accounts.py
|
def get_sub_accounts(self, account_id, params={}):
"""
Return list of subaccounts within the account with the passed
canvas id.
https://canvas.instructure.com/doc/api/accounts.html#method.accounts.sub_accounts
"""
url = ACCOUNTS_API.format(account_id) + "/sub_accounts"
accounts = []
for datum in self._get_paged_resource(url, params=params):
accounts.append(CanvasAccount(data=datum))
return accounts
|
def get_sub_accounts(self, account_id, params={}):
"""
Return list of subaccounts within the account with the passed
canvas id.
https://canvas.instructure.com/doc/api/accounts.html#method.accounts.sub_accounts
"""
url = ACCOUNTS_API.format(account_id) + "/sub_accounts"
accounts = []
for datum in self._get_paged_resource(url, params=params):
accounts.append(CanvasAccount(data=datum))
return accounts
|
[
"Return",
"list",
"of",
"subaccounts",
"within",
"the",
"account",
"with",
"the",
"passed",
"canvas",
"id",
"."
] |
uw-it-aca/uw-restclients-canvas
|
python
|
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/accounts.py#L23-L36
|
[
"def",
"get_sub_accounts",
"(",
"self",
",",
"account_id",
",",
"params",
"=",
"{",
"}",
")",
":",
"url",
"=",
"ACCOUNTS_API",
".",
"format",
"(",
"account_id",
")",
"+",
"\"/sub_accounts\"",
"accounts",
"=",
"[",
"]",
"for",
"datum",
"in",
"self",
".",
"_get_paged_resource",
"(",
"url",
",",
"params",
"=",
"params",
")",
":",
"accounts",
".",
"append",
"(",
"CanvasAccount",
"(",
"data",
"=",
"datum",
")",
")",
"return",
"accounts"
] |
9845faf33d49a8f06908efc22640c001116d6ea2
|
test
|
Accounts.update_account
|
Update the passed account. Returns the updated account.
https://canvas.instructure.com/doc/api/accounts.html#method.accounts.update
|
uw_canvas/accounts.py
|
def update_account(self, account):
"""
Update the passed account. Returns the updated account.
https://canvas.instructure.com/doc/api/accounts.html#method.accounts.update
"""
url = ACCOUNTS_API.format(account.account_id)
body = {"account": {"name": account.name}}
return CanvasAccount(data=self._put_resource(url, body))
|
def update_account(self, account):
"""
Update the passed account. Returns the updated account.
https://canvas.instructure.com/doc/api/accounts.html#method.accounts.update
"""
url = ACCOUNTS_API.format(account.account_id)
body = {"account": {"name": account.name}}
return CanvasAccount(data=self._put_resource(url, body))
|
[
"Update",
"the",
"passed",
"account",
".",
"Returns",
"the",
"updated",
"account",
"."
] |
uw-it-aca/uw-restclients-canvas
|
python
|
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/accounts.py#L60-L69
|
[
"def",
"update_account",
"(",
"self",
",",
"account",
")",
":",
"url",
"=",
"ACCOUNTS_API",
".",
"format",
"(",
"account",
".",
"account_id",
")",
"body",
"=",
"{",
"\"account\"",
":",
"{",
"\"name\"",
":",
"account",
".",
"name",
"}",
"}",
"return",
"CanvasAccount",
"(",
"data",
"=",
"self",
".",
"_put_resource",
"(",
"url",
",",
"body",
")",
")"
] |
9845faf33d49a8f06908efc22640c001116d6ea2
|
test
|
Accounts.update_sis_id
|
Updates the SIS ID for the account identified by the passed account ID.
https://canvas.instructure.com/doc/api/accounts.html#method.accounts.update
|
uw_canvas/accounts.py
|
def update_sis_id(self, account_id, sis_account_id):
"""
Updates the SIS ID for the account identified by the passed account ID.
https://canvas.instructure.com/doc/api/accounts.html#method.accounts.update
"""
if account_id == self._canvas_account_id:
raise Exception("SIS ID cannot be updated for the root account")
url = ACCOUNTS_API.format(account_id)
body = {"account": {"sis_account_id": sis_account_id}}
return CanvasAccount(data=self._put_resource(url, body))
|
def update_sis_id(self, account_id, sis_account_id):
"""
Updates the SIS ID for the account identified by the passed account ID.
https://canvas.instructure.com/doc/api/accounts.html#method.accounts.update
"""
if account_id == self._canvas_account_id:
raise Exception("SIS ID cannot be updated for the root account")
url = ACCOUNTS_API.format(account_id)
body = {"account": {"sis_account_id": sis_account_id}}
return CanvasAccount(data=self._put_resource(url, body))
|
[
"Updates",
"the",
"SIS",
"ID",
"for",
"the",
"account",
"identified",
"by",
"the",
"passed",
"account",
"ID",
"."
] |
uw-it-aca/uw-restclients-canvas
|
python
|
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/accounts.py#L71-L83
|
[
"def",
"update_sis_id",
"(",
"self",
",",
"account_id",
",",
"sis_account_id",
")",
":",
"if",
"account_id",
"==",
"self",
".",
"_canvas_account_id",
":",
"raise",
"Exception",
"(",
"\"SIS ID cannot be updated for the root account\"",
")",
"url",
"=",
"ACCOUNTS_API",
".",
"format",
"(",
"account_id",
")",
"body",
"=",
"{",
"\"account\"",
":",
"{",
"\"sis_account_id\"",
":",
"sis_account_id",
"}",
"}",
"return",
"CanvasAccount",
"(",
"data",
"=",
"self",
".",
"_put_resource",
"(",
"url",
",",
"body",
")",
")"
] |
9845faf33d49a8f06908efc22640c001116d6ea2
|
test
|
Accounts.get_auth_settings
|
Return the authentication settings for the passed account_id.
https://canvas.instructure.com/doc/api/authentication_providers.html#method.account_authorization_configs.show_sso_settings
|
uw_canvas/accounts.py
|
def get_auth_settings(self, account_id):
"""
Return the authentication settings for the passed account_id.
https://canvas.instructure.com/doc/api/authentication_providers.html#method.account_authorization_configs.show_sso_settings
"""
url = ACCOUNTS_API.format(account_id) + "/sso_settings"
return CanvasSSOSettings(data=self._get_resource(url))
|
def get_auth_settings(self, account_id):
"""
Return the authentication settings for the passed account_id.
https://canvas.instructure.com/doc/api/authentication_providers.html#method.account_authorization_configs.show_sso_settings
"""
url = ACCOUNTS_API.format(account_id) + "/sso_settings"
return CanvasSSOSettings(data=self._get_resource(url))
|
[
"Return",
"the",
"authentication",
"settings",
"for",
"the",
"passed",
"account_id",
"."
] |
uw-it-aca/uw-restclients-canvas
|
python
|
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/accounts.py#L85-L92
|
[
"def",
"get_auth_settings",
"(",
"self",
",",
"account_id",
")",
":",
"url",
"=",
"ACCOUNTS_API",
".",
"format",
"(",
"account_id",
")",
"+",
"\"/sso_settings\"",
"return",
"CanvasSSOSettings",
"(",
"data",
"=",
"self",
".",
"_get_resource",
"(",
"url",
")",
")"
] |
9845faf33d49a8f06908efc22640c001116d6ea2
|
test
|
Accounts.update_auth_settings
|
Update the authentication settings for the passed account_id.
https://canvas.instructure.com/doc/api/authentication_providers.html#method.account_authorization_configs.update_sso_settings
|
uw_canvas/accounts.py
|
def update_auth_settings(self, account_id, auth_settings):
"""
Update the authentication settings for the passed account_id.
https://canvas.instructure.com/doc/api/authentication_providers.html#method.account_authorization_configs.update_sso_settings
"""
url = ACCOUNTS_API.format(account_id) + "/sso_settings"
data = self._put_resource(url, auth_settings.json_data())
return CanvasSSOSettings(data=data)
|
def update_auth_settings(self, account_id, auth_settings):
"""
Update the authentication settings for the passed account_id.
https://canvas.instructure.com/doc/api/authentication_providers.html#method.account_authorization_configs.update_sso_settings
"""
url = ACCOUNTS_API.format(account_id) + "/sso_settings"
data = self._put_resource(url, auth_settings.json_data())
return CanvasSSOSettings(data=data)
|
[
"Update",
"the",
"authentication",
"settings",
"for",
"the",
"passed",
"account_id",
"."
] |
uw-it-aca/uw-restclients-canvas
|
python
|
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/accounts.py#L94-L102
|
[
"def",
"update_auth_settings",
"(",
"self",
",",
"account_id",
",",
"auth_settings",
")",
":",
"url",
"=",
"ACCOUNTS_API",
".",
"format",
"(",
"account_id",
")",
"+",
"\"/sso_settings\"",
"data",
"=",
"self",
".",
"_put_resource",
"(",
"url",
",",
"auth_settings",
".",
"json_data",
"(",
")",
")",
"return",
"CanvasSSOSettings",
"(",
"data",
"=",
"data",
")"
] |
9845faf33d49a8f06908efc22640c001116d6ea2
|
test
|
settlement_schmertmann
|
Calculates the settlement of a shallow foundation (Schmertmann, 19XX).
:param sp: Soil Profile object
:param fd: Foundation object
:param load:
:param youngs_modulus_soil: The Young's modulus of the soil.
:param kwargs:
:return: float, the settlement.
|
geofound/settlement.py
|
def settlement_schmertmann(sp, fd, load, youngs_modulus_soil, **kwargs):
"""
Calculates the settlement of a shallow foundation (Schmertmann, 19XX).
:param sp: Soil Profile object
:param fd: Foundation object
:param load:
:param youngs_modulus_soil: The Young's modulus of the soil.
:param kwargs:
:return: float, the settlement.
"""
length = float(fd.length)
breadth = float(fd.width)
depth = float(fd.depth)
load = float(load)
sp.gwl = kwargs.get("gwl", sp.gwl)
sp.unit_sat_weight = kwargs.get("unit_sat_weight", sp.unit_sat_weight)
verbose = kwargs.get("verbose", 0)
years = kwargs.get("years", 0)
q = load / (length * breadth)
sigma_v0_eff = (sp.unit_dry_weight * min(depth, sp.gwl) +
(sp.unit_sat_weight - 9.8) * max([0, depth - sp.gwl]))
delta_q = q - sigma_v0_eff
# EMBEDMENT FACTOR
c_1 = max(1 - 0.5 * (sigma_v0_eff / delta_q), 0.5)
# CREEP FACTOR
if years == 0:
c_2 = 1.0
else:
c_2 = 1.0 + 0.2 * np.log10(years / 0.1)
# SHAPE FACTOR
long = max(length, breadth)
short = min(length, breadth)
c_3 = max(1.03 - 0.03 * (long / short), 0.73)
# Peak settlement index
if long / short > 10:
zp = short + depth
z_top = 0.2
z_bottom = 4 * short + depth
else:
z_top = 0.1
zp = 0.5 * short + depth
z_bottom = 2 * short + depth
sigma_vp_eff = (sp.unit_dry_weight * min(zp, sp.gwl) +
(sp.unit_sat_weight - 9.8) * max([0, zp - sp.gwl]))
i_zp = 0.5 + 0.1 * (delta_q / sigma_vp_eff) ** 0.5
i_z_top = (i_zp + z_top) / 2
i_z_bottom = i_zp / 2
settlement = (c_1 * c_2 * c_3 * delta_q *
(i_z_top * (zp - depth) + i_z_bottom * (z_bottom - zp)) / youngs_modulus_soil)
if verbose:
log("delta_q:", delta_q)
log("c_1:", c_1)
log("c_2:", c_2)
log("c_3:", c_3)
log("zp:", zp)
log("sigma_vp_eff:", sigma_vp_eff)
log("i_zp:", i_zp)
log("i_z_top:", i_z_top)
log("i_z_bottom:", i_z_bottom)
log("settlement:", settlement)
return settlement
|
def settlement_schmertmann(sp, fd, load, youngs_modulus_soil, **kwargs):
"""
Calculates the settlement of a shallow foundation (Schmertmann, 19XX).
:param sp: Soil Profile object
:param fd: Foundation object
:param load:
:param youngs_modulus_soil: The Young's modulus of the soil.
:param kwargs:
:return: float, the settlement.
"""
length = float(fd.length)
breadth = float(fd.width)
depth = float(fd.depth)
load = float(load)
sp.gwl = kwargs.get("gwl", sp.gwl)
sp.unit_sat_weight = kwargs.get("unit_sat_weight", sp.unit_sat_weight)
verbose = kwargs.get("verbose", 0)
years = kwargs.get("years", 0)
q = load / (length * breadth)
sigma_v0_eff = (sp.unit_dry_weight * min(depth, sp.gwl) +
(sp.unit_sat_weight - 9.8) * max([0, depth - sp.gwl]))
delta_q = q - sigma_v0_eff
# EMBEDMENT FACTOR
c_1 = max(1 - 0.5 * (sigma_v0_eff / delta_q), 0.5)
# CREEP FACTOR
if years == 0:
c_2 = 1.0
else:
c_2 = 1.0 + 0.2 * np.log10(years / 0.1)
# SHAPE FACTOR
long = max(length, breadth)
short = min(length, breadth)
c_3 = max(1.03 - 0.03 * (long / short), 0.73)
# Peak settlement index
if long / short > 10:
zp = short + depth
z_top = 0.2
z_bottom = 4 * short + depth
else:
z_top = 0.1
zp = 0.5 * short + depth
z_bottom = 2 * short + depth
sigma_vp_eff = (sp.unit_dry_weight * min(zp, sp.gwl) +
(sp.unit_sat_weight - 9.8) * max([0, zp - sp.gwl]))
i_zp = 0.5 + 0.1 * (delta_q / sigma_vp_eff) ** 0.5
i_z_top = (i_zp + z_top) / 2
i_z_bottom = i_zp / 2
settlement = (c_1 * c_2 * c_3 * delta_q *
(i_z_top * (zp - depth) + i_z_bottom * (z_bottom - zp)) / youngs_modulus_soil)
if verbose:
log("delta_q:", delta_q)
log("c_1:", c_1)
log("c_2:", c_2)
log("c_3:", c_3)
log("zp:", zp)
log("sigma_vp_eff:", sigma_vp_eff)
log("i_zp:", i_zp)
log("i_z_top:", i_z_top)
log("i_z_bottom:", i_z_bottom)
log("settlement:", settlement)
return settlement
|
[
"Calculates",
"the",
"settlement",
"of",
"a",
"shallow",
"foundation",
"(",
"Schmertmann",
"19XX",
")",
"."
] |
eng-tools/geofound
|
python
|
https://github.com/eng-tools/geofound/blob/6b1b097d5db998907bdcb5b4798fb4629674c770/geofound/settlement.py#L6-L74
|
[
"def",
"settlement_schmertmann",
"(",
"sp",
",",
"fd",
",",
"load",
",",
"youngs_modulus_soil",
",",
"*",
"*",
"kwargs",
")",
":",
"length",
"=",
"float",
"(",
"fd",
".",
"length",
")",
"breadth",
"=",
"float",
"(",
"fd",
".",
"width",
")",
"depth",
"=",
"float",
"(",
"fd",
".",
"depth",
")",
"load",
"=",
"float",
"(",
"load",
")",
"sp",
".",
"gwl",
"=",
"kwargs",
".",
"get",
"(",
"\"gwl\"",
",",
"sp",
".",
"gwl",
")",
"sp",
".",
"unit_sat_weight",
"=",
"kwargs",
".",
"get",
"(",
"\"unit_sat_weight\"",
",",
"sp",
".",
"unit_sat_weight",
")",
"verbose",
"=",
"kwargs",
".",
"get",
"(",
"\"verbose\"",
",",
"0",
")",
"years",
"=",
"kwargs",
".",
"get",
"(",
"\"years\"",
",",
"0",
")",
"q",
"=",
"load",
"/",
"(",
"length",
"*",
"breadth",
")",
"sigma_v0_eff",
"=",
"(",
"sp",
".",
"unit_dry_weight",
"*",
"min",
"(",
"depth",
",",
"sp",
".",
"gwl",
")",
"+",
"(",
"sp",
".",
"unit_sat_weight",
"-",
"9.8",
")",
"*",
"max",
"(",
"[",
"0",
",",
"depth",
"-",
"sp",
".",
"gwl",
"]",
")",
")",
"delta_q",
"=",
"q",
"-",
"sigma_v0_eff",
"# EMBEDMENT FACTOR",
"c_1",
"=",
"max",
"(",
"1",
"-",
"0.5",
"*",
"(",
"sigma_v0_eff",
"/",
"delta_q",
")",
",",
"0.5",
")",
"# CREEP FACTOR",
"if",
"years",
"==",
"0",
":",
"c_2",
"=",
"1.0",
"else",
":",
"c_2",
"=",
"1.0",
"+",
"0.2",
"*",
"np",
".",
"log10",
"(",
"years",
"/",
"0.1",
")",
"# SHAPE FACTOR",
"long",
"=",
"max",
"(",
"length",
",",
"breadth",
")",
"short",
"=",
"min",
"(",
"length",
",",
"breadth",
")",
"c_3",
"=",
"max",
"(",
"1.03",
"-",
"0.03",
"*",
"(",
"long",
"/",
"short",
")",
",",
"0.73",
")",
"# Peak settlement index",
"if",
"long",
"/",
"short",
">",
"10",
":",
"zp",
"=",
"short",
"+",
"depth",
"z_top",
"=",
"0.2",
"z_bottom",
"=",
"4",
"*",
"short",
"+",
"depth",
"else",
":",
"z_top",
"=",
"0.1",
"zp",
"=",
"0.5",
"*",
"short",
"+",
"depth",
"z_bottom",
"=",
"2",
"*",
"short",
"+",
"depth",
"sigma_vp_eff",
"=",
"(",
"sp",
".",
"unit_dry_weight",
"*",
"min",
"(",
"zp",
",",
"sp",
".",
"gwl",
")",
"+",
"(",
"sp",
".",
"unit_sat_weight",
"-",
"9.8",
")",
"*",
"max",
"(",
"[",
"0",
",",
"zp",
"-",
"sp",
".",
"gwl",
"]",
")",
")",
"i_zp",
"=",
"0.5",
"+",
"0.1",
"*",
"(",
"delta_q",
"/",
"sigma_vp_eff",
")",
"**",
"0.5",
"i_z_top",
"=",
"(",
"i_zp",
"+",
"z_top",
")",
"/",
"2",
"i_z_bottom",
"=",
"i_zp",
"/",
"2",
"settlement",
"=",
"(",
"c_1",
"*",
"c_2",
"*",
"c_3",
"*",
"delta_q",
"*",
"(",
"i_z_top",
"*",
"(",
"zp",
"-",
"depth",
")",
"+",
"i_z_bottom",
"*",
"(",
"z_bottom",
"-",
"zp",
")",
")",
"/",
"youngs_modulus_soil",
")",
"if",
"verbose",
":",
"log",
"(",
"\"delta_q:\"",
",",
"delta_q",
")",
"log",
"(",
"\"c_1:\"",
",",
"c_1",
")",
"log",
"(",
"\"c_2:\"",
",",
"c_2",
")",
"log",
"(",
"\"c_3:\"",
",",
"c_3",
")",
"log",
"(",
"\"zp:\"",
",",
"zp",
")",
"log",
"(",
"\"sigma_vp_eff:\"",
",",
"sigma_vp_eff",
")",
"log",
"(",
"\"i_zp:\"",
",",
"i_zp",
")",
"log",
"(",
"\"i_z_top:\"",
",",
"i_z_top",
")",
"log",
"(",
"\"i_z_bottom:\"",
",",
"i_z_bottom",
")",
"log",
"(",
"\"settlement:\"",
",",
"settlement",
")",
"return",
"settlement"
] |
6b1b097d5db998907bdcb5b4798fb4629674c770
|
test
|
Terms.get_all_terms
|
Return all of the terms in the account.
https://canvas.instructure.com/doc/api/enrollment_terms.html#method.terms_api.index
|
uw_canvas/terms.py
|
def get_all_terms(self):
"""
Return all of the terms in the account.
https://canvas.instructure.com/doc/api/enrollment_terms.html#method.terms_api.index
"""
if not self._canvas_account_id:
raise MissingAccountID()
params = {"workflow_state": 'all', 'per_page': 500}
url = ACCOUNTS_API.format(self._canvas_account_id) + "/terms"
data_key = 'enrollment_terms'
terms = []
response = self._get_paged_resource(url, params, data_key)
for data in response[data_key]:
terms.append(CanvasTerm(data=data))
return terms
|
def get_all_terms(self):
"""
Return all of the terms in the account.
https://canvas.instructure.com/doc/api/enrollment_terms.html#method.terms_api.index
"""
if not self._canvas_account_id:
raise MissingAccountID()
params = {"workflow_state": 'all', 'per_page': 500}
url = ACCOUNTS_API.format(self._canvas_account_id) + "/terms"
data_key = 'enrollment_terms'
terms = []
response = self._get_paged_resource(url, params, data_key)
for data in response[data_key]:
terms.append(CanvasTerm(data=data))
return terms
|
[
"Return",
"all",
"of",
"the",
"terms",
"in",
"the",
"account",
".",
"https",
":",
"//",
"canvas",
".",
"instructure",
".",
"com",
"/",
"doc",
"/",
"api",
"/",
"enrollment_terms",
".",
"html#method",
".",
"terms_api",
".",
"index"
] |
uw-it-aca/uw-restclients-canvas
|
python
|
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/terms.py#L7-L23
|
[
"def",
"get_all_terms",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_canvas_account_id",
":",
"raise",
"MissingAccountID",
"(",
")",
"params",
"=",
"{",
"\"workflow_state\"",
":",
"'all'",
",",
"'per_page'",
":",
"500",
"}",
"url",
"=",
"ACCOUNTS_API",
".",
"format",
"(",
"self",
".",
"_canvas_account_id",
")",
"+",
"\"/terms\"",
"data_key",
"=",
"'enrollment_terms'",
"terms",
"=",
"[",
"]",
"response",
"=",
"self",
".",
"_get_paged_resource",
"(",
"url",
",",
"params",
",",
"data_key",
")",
"for",
"data",
"in",
"response",
"[",
"data_key",
"]",
":",
"terms",
".",
"append",
"(",
"CanvasTerm",
"(",
"data",
"=",
"data",
")",
")",
"return",
"terms"
] |
9845faf33d49a8f06908efc22640c001116d6ea2
|
test
|
Terms.get_term_by_sis_id
|
Return a term resource for the passed SIS ID.
|
uw_canvas/terms.py
|
def get_term_by_sis_id(self, sis_term_id):
"""
Return a term resource for the passed SIS ID.
"""
for term in self.get_all_terms():
if term.sis_term_id == sis_term_id:
return term
|
def get_term_by_sis_id(self, sis_term_id):
"""
Return a term resource for the passed SIS ID.
"""
for term in self.get_all_terms():
if term.sis_term_id == sis_term_id:
return term
|
[
"Return",
"a",
"term",
"resource",
"for",
"the",
"passed",
"SIS",
"ID",
"."
] |
uw-it-aca/uw-restclients-canvas
|
python
|
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/terms.py#L25-L31
|
[
"def",
"get_term_by_sis_id",
"(",
"self",
",",
"sis_term_id",
")",
":",
"for",
"term",
"in",
"self",
".",
"get_all_terms",
"(",
")",
":",
"if",
"term",
".",
"sis_term_id",
"==",
"sis_term_id",
":",
"return",
"term"
] |
9845faf33d49a8f06908efc22640c001116d6ea2
|
test
|
Terms.update_term_overrides
|
Update an existing enrollment term for the passed SIS ID.
https://canvas.instructure.com/doc/api/enrollment_terms.html#method.terms.update
|
uw_canvas/terms.py
|
def update_term_overrides(self, sis_term_id, overrides={}):
"""
Update an existing enrollment term for the passed SIS ID.
https://canvas.instructure.com/doc/api/enrollment_terms.html#method.terms.update
"""
if not self._canvas_account_id:
raise MissingAccountID()
url = ACCOUNTS_API.format(
self._canvas_account_id) + "/terms/{}".format(
self._sis_id(sis_term_id, sis_field='term'))
body = {'enrollment_term': {'overrides': overrides}}
return CanvasTerm(data=self._put_resource(url, body))
|
def update_term_overrides(self, sis_term_id, overrides={}):
"""
Update an existing enrollment term for the passed SIS ID.
https://canvas.instructure.com/doc/api/enrollment_terms.html#method.terms.update
"""
if not self._canvas_account_id:
raise MissingAccountID()
url = ACCOUNTS_API.format(
self._canvas_account_id) + "/terms/{}".format(
self._sis_id(sis_term_id, sis_field='term'))
body = {'enrollment_term': {'overrides': overrides}}
return CanvasTerm(data=self._put_resource(url, body))
|
[
"Update",
"an",
"existing",
"enrollment",
"term",
"for",
"the",
"passed",
"SIS",
"ID",
".",
"https",
":",
"//",
"canvas",
".",
"instructure",
".",
"com",
"/",
"doc",
"/",
"api",
"/",
"enrollment_terms",
".",
"html#method",
".",
"terms",
".",
"update"
] |
uw-it-aca/uw-restclients-canvas
|
python
|
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/terms.py#L33-L46
|
[
"def",
"update_term_overrides",
"(",
"self",
",",
"sis_term_id",
",",
"overrides",
"=",
"{",
"}",
")",
":",
"if",
"not",
"self",
".",
"_canvas_account_id",
":",
"raise",
"MissingAccountID",
"(",
")",
"url",
"=",
"ACCOUNTS_API",
".",
"format",
"(",
"self",
".",
"_canvas_account_id",
")",
"+",
"\"/terms/{}\"",
".",
"format",
"(",
"self",
".",
"_sis_id",
"(",
"sis_term_id",
",",
"sis_field",
"=",
"'term'",
")",
")",
"body",
"=",
"{",
"'enrollment_term'",
":",
"{",
"'overrides'",
":",
"overrides",
"}",
"}",
"return",
"CanvasTerm",
"(",
"data",
"=",
"self",
".",
"_put_resource",
"(",
"url",
",",
"body",
")",
")"
] |
9845faf33d49a8f06908efc22640c001116d6ea2
|
test
|
log
|
Produces console output.
:param out_str: Output string
:param o2: Additional output string
:param o3: Additional output string
:param o4: Additional output string
:return: None
|
geofound/output.py
|
def log(out_str, o2="", o3="", o4=""):
"""
Produces console output.
:param out_str: Output string
:param o2: Additional output string
:param o3: Additional output string
:param o4: Additional output string
:return: None
"""
print(out_str, o2, o3, o4)
|
def log(out_str, o2="", o3="", o4=""):
"""
Produces console output.
:param out_str: Output string
:param o2: Additional output string
:param o3: Additional output string
:param o4: Additional output string
:return: None
"""
print(out_str, o2, o3, o4)
|
[
"Produces",
"console",
"output",
".",
":",
"param",
"out_str",
":",
"Output",
"string",
":",
"param",
"o2",
":",
"Additional",
"output",
"string",
":",
"param",
"o3",
":",
"Additional",
"output",
"string",
":",
"param",
"o4",
":",
"Additional",
"output",
"string",
":",
"return",
":",
"None"
] |
eng-tools/geofound
|
python
|
https://github.com/eng-tools/geofound/blob/6b1b097d5db998907bdcb5b4798fb4629674c770/geofound/output.py#L3-L12
|
[
"def",
"log",
"(",
"out_str",
",",
"o2",
"=",
"\"\"",
",",
"o3",
"=",
"\"\"",
",",
"o4",
"=",
"\"\"",
")",
":",
"print",
"(",
"out_str",
",",
"o2",
",",
"o3",
",",
"o4",
")"
] |
6b1b097d5db998907bdcb5b4798fb4629674c770
|
test
|
SISImport.import_str
|
Imports a CSV string.
https://canvas.instructure.com/doc/api/sis_imports.html#method.sis_imports_api.create
|
uw_canvas/sis_import.py
|
def import_str(self, csv, params={}):
"""
Imports a CSV string.
https://canvas.instructure.com/doc/api/sis_imports.html#method.sis_imports_api.create
"""
if not self._canvas_account_id:
raise MissingAccountID()
params["import_type"] = SISImportModel.CSV_IMPORT_TYPE
url = SIS_IMPORTS_API.format(
self._canvas_account_id) + ".json{}".format(self._params(params))
headers = {"Content-Type": "text/csv"}
return SISImportModel(data=self._post_resource(url, headers, csv))
|
def import_str(self, csv, params={}):
"""
Imports a CSV string.
https://canvas.instructure.com/doc/api/sis_imports.html#method.sis_imports_api.create
"""
if not self._canvas_account_id:
raise MissingAccountID()
params["import_type"] = SISImportModel.CSV_IMPORT_TYPE
url = SIS_IMPORTS_API.format(
self._canvas_account_id) + ".json{}".format(self._params(params))
headers = {"Content-Type": "text/csv"}
return SISImportModel(data=self._post_resource(url, headers, csv))
|
[
"Imports",
"a",
"CSV",
"string",
"."
] |
uw-it-aca/uw-restclients-canvas
|
python
|
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/sis_import.py#L18-L32
|
[
"def",
"import_str",
"(",
"self",
",",
"csv",
",",
"params",
"=",
"{",
"}",
")",
":",
"if",
"not",
"self",
".",
"_canvas_account_id",
":",
"raise",
"MissingAccountID",
"(",
")",
"params",
"[",
"\"import_type\"",
"]",
"=",
"SISImportModel",
".",
"CSV_IMPORT_TYPE",
"url",
"=",
"SIS_IMPORTS_API",
".",
"format",
"(",
"self",
".",
"_canvas_account_id",
")",
"+",
"\".json{}\"",
".",
"format",
"(",
"self",
".",
"_params",
"(",
"params",
")",
")",
"headers",
"=",
"{",
"\"Content-Type\"",
":",
"\"text/csv\"",
"}",
"return",
"SISImportModel",
"(",
"data",
"=",
"self",
".",
"_post_resource",
"(",
"url",
",",
"headers",
",",
"csv",
")",
")"
] |
9845faf33d49a8f06908efc22640c001116d6ea2
|
test
|
SISImport.import_dir
|
Imports a directory of CSV files.
https://canvas.instructure.com/doc/api/sis_imports.html#method.sis_imports_api.create
|
uw_canvas/sis_import.py
|
def import_dir(self, dir_path, params={}):
"""
Imports a directory of CSV files.
https://canvas.instructure.com/doc/api/sis_imports.html#method.sis_imports_api.create
"""
if not self._canvas_account_id:
raise MissingAccountID()
body = self._build_archive(dir_path)
params["import_type"] = SISImportModel.CSV_IMPORT_TYPE
url = SIS_IMPORTS_API.format(
self._canvas_account_id) + ".json{}".format(self._params(params))
headers = {"Content-Type": "application/zip"}
return SISImportModel(data=self._post_resource(url, headers, body))
|
def import_dir(self, dir_path, params={}):
"""
Imports a directory of CSV files.
https://canvas.instructure.com/doc/api/sis_imports.html#method.sis_imports_api.create
"""
if not self._canvas_account_id:
raise MissingAccountID()
body = self._build_archive(dir_path)
params["import_type"] = SISImportModel.CSV_IMPORT_TYPE
url = SIS_IMPORTS_API.format(
self._canvas_account_id) + ".json{}".format(self._params(params))
headers = {"Content-Type": "application/zip"}
return SISImportModel(data=self._post_resource(url, headers, body))
|
[
"Imports",
"a",
"directory",
"of",
"CSV",
"files",
"."
] |
uw-it-aca/uw-restclients-canvas
|
python
|
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/sis_import.py#L34-L49
|
[
"def",
"import_dir",
"(",
"self",
",",
"dir_path",
",",
"params",
"=",
"{",
"}",
")",
":",
"if",
"not",
"self",
".",
"_canvas_account_id",
":",
"raise",
"MissingAccountID",
"(",
")",
"body",
"=",
"self",
".",
"_build_archive",
"(",
"dir_path",
")",
"params",
"[",
"\"import_type\"",
"]",
"=",
"SISImportModel",
".",
"CSV_IMPORT_TYPE",
"url",
"=",
"SIS_IMPORTS_API",
".",
"format",
"(",
"self",
".",
"_canvas_account_id",
")",
"+",
"\".json{}\"",
".",
"format",
"(",
"self",
".",
"_params",
"(",
"params",
")",
")",
"headers",
"=",
"{",
"\"Content-Type\"",
":",
"\"application/zip\"",
"}",
"return",
"SISImportModel",
"(",
"data",
"=",
"self",
".",
"_post_resource",
"(",
"url",
",",
"headers",
",",
"body",
")",
")"
] |
9845faf33d49a8f06908efc22640c001116d6ea2
|
test
|
SISImport.get_import_status
|
Get the status of an already created SIS import.
https://canvas.instructure.com/doc/api/sis_imports.html#method.sis_imports_api.show
|
uw_canvas/sis_import.py
|
def get_import_status(self, sis_import):
"""
Get the status of an already created SIS import.
https://canvas.instructure.com/doc/api/sis_imports.html#method.sis_imports_api.show
"""
if not self._canvas_account_id:
raise MissingAccountID()
url = SIS_IMPORTS_API.format(
self._canvas_account_id) + "/{}.json".format(sis_import.import_id)
return SISImportModel(data=self._get_resource(url))
|
def get_import_status(self, sis_import):
"""
Get the status of an already created SIS import.
https://canvas.instructure.com/doc/api/sis_imports.html#method.sis_imports_api.show
"""
if not self._canvas_account_id:
raise MissingAccountID()
url = SIS_IMPORTS_API.format(
self._canvas_account_id) + "/{}.json".format(sis_import.import_id)
return SISImportModel(data=self._get_resource(url))
|
[
"Get",
"the",
"status",
"of",
"an",
"already",
"created",
"SIS",
"import",
"."
] |
uw-it-aca/uw-restclients-canvas
|
python
|
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/sis_import.py#L51-L63
|
[
"def",
"get_import_status",
"(",
"self",
",",
"sis_import",
")",
":",
"if",
"not",
"self",
".",
"_canvas_account_id",
":",
"raise",
"MissingAccountID",
"(",
")",
"url",
"=",
"SIS_IMPORTS_API",
".",
"format",
"(",
"self",
".",
"_canvas_account_id",
")",
"+",
"\"/{}.json\"",
".",
"format",
"(",
"sis_import",
".",
"import_id",
")",
"return",
"SISImportModel",
"(",
"data",
"=",
"self",
".",
"_get_resource",
"(",
"url",
")",
")"
] |
9845faf33d49a8f06908efc22640c001116d6ea2
|
test
|
SISImport._build_archive
|
Creates a zip archive from files in path.
|
uw_canvas/sis_import.py
|
def _build_archive(self, dir_path):
"""
Creates a zip archive from files in path.
"""
zip_path = os.path.join(dir_path, "import.zip")
archive = zipfile.ZipFile(zip_path, "w")
for filename in CSV_FILES:
filepath = os.path.join(dir_path, filename)
if os.path.exists(filepath):
archive.write(filepath, filename, zipfile.ZIP_DEFLATED)
archive.close()
with open(zip_path, "rb") as f:
body = f.read()
return body
|
def _build_archive(self, dir_path):
"""
Creates a zip archive from files in path.
"""
zip_path = os.path.join(dir_path, "import.zip")
archive = zipfile.ZipFile(zip_path, "w")
for filename in CSV_FILES:
filepath = os.path.join(dir_path, filename)
if os.path.exists(filepath):
archive.write(filepath, filename, zipfile.ZIP_DEFLATED)
archive.close()
with open(zip_path, "rb") as f:
body = f.read()
return body
|
[
"Creates",
"a",
"zip",
"archive",
"from",
"files",
"in",
"path",
"."
] |
uw-it-aca/uw-restclients-canvas
|
python
|
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/sis_import.py#L75-L93
|
[
"def",
"_build_archive",
"(",
"self",
",",
"dir_path",
")",
":",
"zip_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir_path",
",",
"\"import.zip\"",
")",
"archive",
"=",
"zipfile",
".",
"ZipFile",
"(",
"zip_path",
",",
"\"w\"",
")",
"for",
"filename",
"in",
"CSV_FILES",
":",
"filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir_path",
",",
"filename",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"filepath",
")",
":",
"archive",
".",
"write",
"(",
"filepath",
",",
"filename",
",",
"zipfile",
".",
"ZIP_DEFLATED",
")",
"archive",
".",
"close",
"(",
")",
"with",
"open",
"(",
"zip_path",
",",
"\"rb\"",
")",
"as",
"f",
":",
"body",
"=",
"f",
".",
"read",
"(",
")",
"return",
"body"
] |
9845faf33d49a8f06908efc22640c001116d6ea2
|
test
|
Assignments.get_assignments
|
List assignments for a given course
https://canvas.instructure.com/doc/api/assignments.html#method.assignments_api.index
|
uw_canvas/assignments.py
|
def get_assignments(self, course_id):
"""
List assignments for a given course
https://canvas.instructure.com/doc/api/assignments.html#method.assignments_api.index
"""
url = ASSIGNMENTS_API.format(course_id)
data = self._get_resource(url)
assignments = []
for datum in data:
assignments.append(Assignment(data=datum))
return assignments
|
def get_assignments(self, course_id):
"""
List assignments for a given course
https://canvas.instructure.com/doc/api/assignments.html#method.assignments_api.index
"""
url = ASSIGNMENTS_API.format(course_id)
data = self._get_resource(url)
assignments = []
for datum in data:
assignments.append(Assignment(data=datum))
return assignments
|
[
"List",
"assignments",
"for",
"a",
"given",
"course"
] |
uw-it-aca/uw-restclients-canvas
|
python
|
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/assignments.py#L9-L20
|
[
"def",
"get_assignments",
"(",
"self",
",",
"course_id",
")",
":",
"url",
"=",
"ASSIGNMENTS_API",
".",
"format",
"(",
"course_id",
")",
"data",
"=",
"self",
".",
"_get_resource",
"(",
"url",
")",
"assignments",
"=",
"[",
"]",
"for",
"datum",
"in",
"data",
":",
"assignments",
".",
"append",
"(",
"Assignment",
"(",
"data",
"=",
"datum",
")",
")",
"return",
"assignments"
] |
9845faf33d49a8f06908efc22640c001116d6ea2
|
test
|
Assignments.update_assignment
|
Modify an existing assignment.
https://canvas.instructure.com/doc/api/assignments.html#method.assignments_api.update
|
uw_canvas/assignments.py
|
def update_assignment(self, assignment):
"""
Modify an existing assignment.
https://canvas.instructure.com/doc/api/assignments.html#method.assignments_api.update
"""
url = ASSIGNMENTS_API.format(assignment.course_id) + "/{}".format(
assignment.assignment_id)
data = self._put_resource(url, assignment.json_data())
return Assignment(data=data)
|
def update_assignment(self, assignment):
"""
Modify an existing assignment.
https://canvas.instructure.com/doc/api/assignments.html#method.assignments_api.update
"""
url = ASSIGNMENTS_API.format(assignment.course_id) + "/{}".format(
assignment.assignment_id)
data = self._put_resource(url, assignment.json_data())
return Assignment(data=data)
|
[
"Modify",
"an",
"existing",
"assignment",
"."
] |
uw-it-aca/uw-restclients-canvas
|
python
|
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/assignments.py#L30-L40
|
[
"def",
"update_assignment",
"(",
"self",
",",
"assignment",
")",
":",
"url",
"=",
"ASSIGNMENTS_API",
".",
"format",
"(",
"assignment",
".",
"course_id",
")",
"+",
"\"/{}\"",
".",
"format",
"(",
"assignment",
".",
"assignment_id",
")",
"data",
"=",
"self",
".",
"_put_resource",
"(",
"url",
",",
"assignment",
".",
"json_data",
"(",
")",
")",
"return",
"Assignment",
"(",
"data",
"=",
"data",
")"
] |
9845faf33d49a8f06908efc22640c001116d6ea2
|
test
|
Reports.get_available_reports
|
Returns the list of reports for the canvas account id.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.available_reports
|
uw_canvas/reports.py
|
def get_available_reports(self, account_id):
"""
Returns the list of reports for the canvas account id.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.available_reports
"""
url = ACCOUNTS_API.format(account_id) + "/reports"
report_types = []
for datum in self._get_resource(url):
report_types.append(ReportType(data=datum, account_id=account_id))
return report_types
|
def get_available_reports(self, account_id):
"""
Returns the list of reports for the canvas account id.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.available_reports
"""
url = ACCOUNTS_API.format(account_id) + "/reports"
report_types = []
for datum in self._get_resource(url):
report_types.append(ReportType(data=datum, account_id=account_id))
return report_types
|
[
"Returns",
"the",
"list",
"of",
"reports",
"for",
"the",
"canvas",
"account",
"id",
"."
] |
uw-it-aca/uw-restclients-canvas
|
python
|
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/reports.py#L23-L34
|
[
"def",
"get_available_reports",
"(",
"self",
",",
"account_id",
")",
":",
"url",
"=",
"ACCOUNTS_API",
".",
"format",
"(",
"account_id",
")",
"+",
"\"/reports\"",
"report_types",
"=",
"[",
"]",
"for",
"datum",
"in",
"self",
".",
"_get_resource",
"(",
"url",
")",
":",
"report_types",
".",
"append",
"(",
"ReportType",
"(",
"data",
"=",
"datum",
",",
"account_id",
"=",
"account_id",
")",
")",
"return",
"report_types"
] |
9845faf33d49a8f06908efc22640c001116d6ea2
|
test
|
Reports.get_reports_by_type
|
Shows all reports of the passed report_type that have been run
for the canvas account id.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.index
|
uw_canvas/reports.py
|
def get_reports_by_type(self, account_id, report_type):
"""
Shows all reports of the passed report_type that have been run
for the canvas account id.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.index
"""
url = ACCOUNTS_API.format(account_id) + "/reports/{}".format(
report_type)
reports = []
for datum in self._get_resource(url):
datum["account_id"] = account_id
reports.append(Report(data=datum))
return reports
|
def get_reports_by_type(self, account_id, report_type):
"""
Shows all reports of the passed report_type that have been run
for the canvas account id.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.index
"""
url = ACCOUNTS_API.format(account_id) + "/reports/{}".format(
report_type)
reports = []
for datum in self._get_resource(url):
datum["account_id"] = account_id
reports.append(Report(data=datum))
return reports
|
[
"Shows",
"all",
"reports",
"of",
"the",
"passed",
"report_type",
"that",
"have",
"been",
"run",
"for",
"the",
"canvas",
"account",
"id",
"."
] |
uw-it-aca/uw-restclients-canvas
|
python
|
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/reports.py#L36-L51
|
[
"def",
"get_reports_by_type",
"(",
"self",
",",
"account_id",
",",
"report_type",
")",
":",
"url",
"=",
"ACCOUNTS_API",
".",
"format",
"(",
"account_id",
")",
"+",
"\"/reports/{}\"",
".",
"format",
"(",
"report_type",
")",
"reports",
"=",
"[",
"]",
"for",
"datum",
"in",
"self",
".",
"_get_resource",
"(",
"url",
")",
":",
"datum",
"[",
"\"account_id\"",
"]",
"=",
"account_id",
"reports",
".",
"append",
"(",
"Report",
"(",
"data",
"=",
"datum",
")",
")",
"return",
"reports"
] |
9845faf33d49a8f06908efc22640c001116d6ea2
|
test
|
Reports.create_report
|
Generates a report instance for the canvas account id.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.create
|
uw_canvas/reports.py
|
def create_report(self, report_type, account_id, term_id=None, params={}):
"""
Generates a report instance for the canvas account id.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.create
"""
if term_id is not None:
params["enrollment_term_id"] = term_id
url = ACCOUNTS_API.format(account_id) + "/reports/{}".format(
report_type)
body = {"parameters": params}
data = self._post_resource(url, body)
data["account_id"] = account_id
return Report(data=data)
|
def create_report(self, report_type, account_id, term_id=None, params={}):
"""
Generates a report instance for the canvas account id.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.create
"""
if term_id is not None:
params["enrollment_term_id"] = term_id
url = ACCOUNTS_API.format(account_id) + "/reports/{}".format(
report_type)
body = {"parameters": params}
data = self._post_resource(url, body)
data["account_id"] = account_id
return Report(data=data)
|
[
"Generates",
"a",
"report",
"instance",
"for",
"the",
"canvas",
"account",
"id",
"."
] |
uw-it-aca/uw-restclients-canvas
|
python
|
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/reports.py#L53-L68
|
[
"def",
"create_report",
"(",
"self",
",",
"report_type",
",",
"account_id",
",",
"term_id",
"=",
"None",
",",
"params",
"=",
"{",
"}",
")",
":",
"if",
"term_id",
"is",
"not",
"None",
":",
"params",
"[",
"\"enrollment_term_id\"",
"]",
"=",
"term_id",
"url",
"=",
"ACCOUNTS_API",
".",
"format",
"(",
"account_id",
")",
"+",
"\"/reports/{}\"",
".",
"format",
"(",
"report_type",
")",
"body",
"=",
"{",
"\"parameters\"",
":",
"params",
"}",
"data",
"=",
"self",
".",
"_post_resource",
"(",
"url",
",",
"body",
")",
"data",
"[",
"\"account_id\"",
"]",
"=",
"account_id",
"return",
"Report",
"(",
"data",
"=",
"data",
")"
] |
9845faf33d49a8f06908efc22640c001116d6ea2
|
test
|
Reports.create_course_provisioning_report
|
Convenience method for create_report, for creating a course
provisioning report.
|
uw_canvas/reports.py
|
def create_course_provisioning_report(self, account_id, term_id=None,
params={}):
"""
Convenience method for create_report, for creating a course
provisioning report.
"""
params["courses"] = True
return self.create_report(ReportType.PROVISIONING, account_id, term_id,
params)
|
def create_course_provisioning_report(self, account_id, term_id=None,
params={}):
"""
Convenience method for create_report, for creating a course
provisioning report.
"""
params["courses"] = True
return self.create_report(ReportType.PROVISIONING, account_id, term_id,
params)
|
[
"Convenience",
"method",
"for",
"create_report",
"for",
"creating",
"a",
"course",
"provisioning",
"report",
"."
] |
uw-it-aca/uw-restclients-canvas
|
python
|
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/reports.py#L70-L78
|
[
"def",
"create_course_provisioning_report",
"(",
"self",
",",
"account_id",
",",
"term_id",
"=",
"None",
",",
"params",
"=",
"{",
"}",
")",
":",
"params",
"[",
"\"courses\"",
"]",
"=",
"True",
"return",
"self",
".",
"create_report",
"(",
"ReportType",
".",
"PROVISIONING",
",",
"account_id",
",",
"term_id",
",",
"params",
")"
] |
9845faf33d49a8f06908efc22640c001116d6ea2
|
test
|
Reports.create_course_sis_export_report
|
Convenience method for create_report, for creating a course sis export
report.
|
uw_canvas/reports.py
|
def create_course_sis_export_report(self, account_id, term_id=None,
params={}):
"""
Convenience method for create_report, for creating a course sis export
report.
"""
params["courses"] = True
return self.create_report(ReportType.SIS_EXPORT, account_id, term_id,
params)
|
def create_course_sis_export_report(self, account_id, term_id=None,
params={}):
"""
Convenience method for create_report, for creating a course sis export
report.
"""
params["courses"] = True
return self.create_report(ReportType.SIS_EXPORT, account_id, term_id,
params)
|
[
"Convenience",
"method",
"for",
"create_report",
"for",
"creating",
"a",
"course",
"sis",
"export",
"report",
"."
] |
uw-it-aca/uw-restclients-canvas
|
python
|
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/reports.py#L110-L118
|
[
"def",
"create_course_sis_export_report",
"(",
"self",
",",
"account_id",
",",
"term_id",
"=",
"None",
",",
"params",
"=",
"{",
"}",
")",
":",
"params",
"[",
"\"courses\"",
"]",
"=",
"True",
"return",
"self",
".",
"create_report",
"(",
"ReportType",
".",
"SIS_EXPORT",
",",
"account_id",
",",
"term_id",
",",
"params",
")"
] |
9845faf33d49a8f06908efc22640c001116d6ea2
|
test
|
Reports.create_unused_courses_report
|
Convenience method for create_report, for creating an unused courses
report.
|
uw_canvas/reports.py
|
def create_unused_courses_report(self, account_id, term_id=None):
"""
Convenience method for create_report, for creating an unused courses
report.
"""
return self.create_report(ReportType.UNUSED_COURSES, account_id,
term_id)
|
def create_unused_courses_report(self, account_id, term_id=None):
"""
Convenience method for create_report, for creating an unused courses
report.
"""
return self.create_report(ReportType.UNUSED_COURSES, account_id,
term_id)
|
[
"Convenience",
"method",
"for",
"create_report",
"for",
"creating",
"an",
"unused",
"courses",
"report",
"."
] |
uw-it-aca/uw-restclients-canvas
|
python
|
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/reports.py#L120-L126
|
[
"def",
"create_unused_courses_report",
"(",
"self",
",",
"account_id",
",",
"term_id",
"=",
"None",
")",
":",
"return",
"self",
".",
"create_report",
"(",
"ReportType",
".",
"UNUSED_COURSES",
",",
"account_id",
",",
"term_id",
")"
] |
9845faf33d49a8f06908efc22640c001116d6ea2
|
test
|
Reports.get_report_data
|
Returns a completed report as a list of csv strings.
|
uw_canvas/reports.py
|
def get_report_data(self, report):
"""
Returns a completed report as a list of csv strings.
"""
if report.report_id is None or report.status is None:
raise ReportFailureException(report)
interval = getattr(settings, 'CANVAS_REPORT_POLLING_INTERVAL', 5)
while report.status != "complete":
if report.status == "error":
raise ReportFailureException(report)
sleep(interval)
report = self.get_report_status(report)
if report.attachment is None or report.attachment.url is None:
return
data = self._get_report_file(report.attachment.url)
return data.split("\n")
|
def get_report_data(self, report):
"""
Returns a completed report as a list of csv strings.
"""
if report.report_id is None or report.status is None:
raise ReportFailureException(report)
interval = getattr(settings, 'CANVAS_REPORT_POLLING_INTERVAL', 5)
while report.status != "complete":
if report.status == "error":
raise ReportFailureException(report)
sleep(interval)
report = self.get_report_status(report)
if report.attachment is None or report.attachment.url is None:
return
data = self._get_report_file(report.attachment.url)
return data.split("\n")
|
[
"Returns",
"a",
"completed",
"report",
"as",
"a",
"list",
"of",
"csv",
"strings",
"."
] |
uw-it-aca/uw-restclients-canvas
|
python
|
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/reports.py#L128-L147
|
[
"def",
"get_report_data",
"(",
"self",
",",
"report",
")",
":",
"if",
"report",
".",
"report_id",
"is",
"None",
"or",
"report",
".",
"status",
"is",
"None",
":",
"raise",
"ReportFailureException",
"(",
"report",
")",
"interval",
"=",
"getattr",
"(",
"settings",
",",
"'CANVAS_REPORT_POLLING_INTERVAL'",
",",
"5",
")",
"while",
"report",
".",
"status",
"!=",
"\"complete\"",
":",
"if",
"report",
".",
"status",
"==",
"\"error\"",
":",
"raise",
"ReportFailureException",
"(",
"report",
")",
"sleep",
"(",
"interval",
")",
"report",
"=",
"self",
".",
"get_report_status",
"(",
"report",
")",
"if",
"report",
".",
"attachment",
"is",
"None",
"or",
"report",
".",
"attachment",
".",
"url",
"is",
"None",
":",
"return",
"data",
"=",
"self",
".",
"_get_report_file",
"(",
"report",
".",
"attachment",
".",
"url",
")",
"return",
"data",
".",
"split",
"(",
"\"\\n\"",
")"
] |
9845faf33d49a8f06908efc22640c001116d6ea2
|
test
|
Reports.get_report_status
|
Returns the status of a report.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.show
|
uw_canvas/reports.py
|
def get_report_status(self, report):
"""
Returns the status of a report.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.show
"""
if (report.account_id is None or report.type is None or
report.report_id is None):
raise ReportFailureException(report)
url = ACCOUNTS_API.format(report.account_id) + "/reports/{}/{}".format(
report.type, report.report_id)
data = self._get_resource(url)
data["account_id"] = report.account_id
return Report(data=data)
|
def get_report_status(self, report):
"""
Returns the status of a report.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.show
"""
if (report.account_id is None or report.type is None or
report.report_id is None):
raise ReportFailureException(report)
url = ACCOUNTS_API.format(report.account_id) + "/reports/{}/{}".format(
report.type, report.report_id)
data = self._get_resource(url)
data["account_id"] = report.account_id
return Report(data=data)
|
[
"Returns",
"the",
"status",
"of",
"a",
"report",
"."
] |
uw-it-aca/uw-restclients-canvas
|
python
|
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/reports.py#L149-L164
|
[
"def",
"get_report_status",
"(",
"self",
",",
"report",
")",
":",
"if",
"(",
"report",
".",
"account_id",
"is",
"None",
"or",
"report",
".",
"type",
"is",
"None",
"or",
"report",
".",
"report_id",
"is",
"None",
")",
":",
"raise",
"ReportFailureException",
"(",
"report",
")",
"url",
"=",
"ACCOUNTS_API",
".",
"format",
"(",
"report",
".",
"account_id",
")",
"+",
"\"/reports/{}/{}\"",
".",
"format",
"(",
"report",
".",
"type",
",",
"report",
".",
"report_id",
")",
"data",
"=",
"self",
".",
"_get_resource",
"(",
"url",
")",
"data",
"[",
"\"account_id\"",
"]",
"=",
"report",
".",
"account_id",
"return",
"Report",
"(",
"data",
"=",
"data",
")"
] |
9845faf33d49a8f06908efc22640c001116d6ea2
|
test
|
Reports.delete_report
|
Deletes a generated report instance.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.destroy
|
uw_canvas/reports.py
|
def delete_report(self, report):
"""
Deletes a generated report instance.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.destroy
"""
url = ACCOUNTS_API.format(report.account_id) + "/reports/{}/{}".format(
report.type, report.report_id)
response = self._delete_resource(url)
return True
|
def delete_report(self, report):
"""
Deletes a generated report instance.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.destroy
"""
url = ACCOUNTS_API.format(report.account_id) + "/reports/{}/{}".format(
report.type, report.report_id)
response = self._delete_resource(url)
return True
|
[
"Deletes",
"a",
"generated",
"report",
"instance",
"."
] |
uw-it-aca/uw-restclients-canvas
|
python
|
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/reports.py#L166-L176
|
[
"def",
"delete_report",
"(",
"self",
",",
"report",
")",
":",
"url",
"=",
"ACCOUNTS_API",
".",
"format",
"(",
"report",
".",
"account_id",
")",
"+",
"\"/reports/{}/{}\"",
".",
"format",
"(",
"report",
".",
"type",
",",
"report",
".",
"report_id",
")",
"response",
"=",
"self",
".",
"_delete_resource",
"(",
"url",
")",
"return",
"True"
] |
9845faf33d49a8f06908efc22640c001116d6ea2
|
test
|
crop_image
|
Crop an image given the top left corner.
:param img: The image
:param start_y: The top left corner y coord
:param start_x: The top left corner x coord
:param h: The result height
:param w: The result width
:return: The cropped image.
|
opendatalake/detection/utils.py
|
def crop_image(img, start_y, start_x, h, w):
"""
Crop an image given the top left corner.
:param img: The image
:param start_y: The top left corner y coord
:param start_x: The top left corner x coord
:param h: The result height
:param w: The result width
:return: The cropped image.
"""
return img[start_y:start_y + h, start_x:start_x + w, :].copy()
|
def crop_image(img, start_y, start_x, h, w):
"""
Crop an image given the top left corner.
:param img: The image
:param start_y: The top left corner y coord
:param start_x: The top left corner x coord
:param h: The result height
:param w: The result width
:return: The cropped image.
"""
return img[start_y:start_y + h, start_x:start_x + w, :].copy()
|
[
"Crop",
"an",
"image",
"given",
"the",
"top",
"left",
"corner",
".",
":",
"param",
"img",
":",
"The",
"image",
":",
"param",
"start_y",
":",
"The",
"top",
"left",
"corner",
"y",
"coord",
":",
"param",
"start_x",
":",
"The",
"top",
"left",
"corner",
"x",
"coord",
":",
"param",
"h",
":",
"The",
"result",
"height",
":",
"param",
"w",
":",
"The",
"result",
"width",
":",
"return",
":",
"The",
"cropped",
"image",
"."
] |
penguinmenac3/opendatalake
|
python
|
https://github.com/penguinmenac3/opendatalake/blob/77c888377095e1812a16982c8efbd2f6b1697a33/opendatalake/detection/utils.py#L758-L768
|
[
"def",
"crop_image",
"(",
"img",
",",
"start_y",
",",
"start_x",
",",
"h",
",",
"w",
")",
":",
"return",
"img",
"[",
"start_y",
":",
"start_y",
"+",
"h",
",",
"start_x",
":",
"start_x",
"+",
"w",
",",
":",
"]",
".",
"copy",
"(",
")"
] |
77c888377095e1812a16982c8efbd2f6b1697a33
|
test
|
move_detections
|
Move detections in direction dx, dy.
:param label: The label dict containing all detection lists.
:param dy: The delta in y direction as a number.
:param dx: The delta in x direction as a number.
:return:
|
opendatalake/detection/utils.py
|
def move_detections(label, dy, dx):
"""
Move detections in direction dx, dy.
:param label: The label dict containing all detection lists.
:param dy: The delta in y direction as a number.
:param dx: The delta in x direction as a number.
:return:
"""
for k in label.keys():
if k.startswith("detection"):
detections = label[k]
for detection in detections:
detection.move_image(-dx, -dy)
|
def move_detections(label, dy, dx):
"""
Move detections in direction dx, dy.
:param label: The label dict containing all detection lists.
:param dy: The delta in y direction as a number.
:param dx: The delta in x direction as a number.
:return:
"""
for k in label.keys():
if k.startswith("detection"):
detections = label[k]
for detection in detections:
detection.move_image(-dx, -dy)
|
[
"Move",
"detections",
"in",
"direction",
"dx",
"dy",
"."
] |
penguinmenac3/opendatalake
|
python
|
https://github.com/penguinmenac3/opendatalake/blob/77c888377095e1812a16982c8efbd2f6b1697a33/opendatalake/detection/utils.py#L771-L784
|
[
"def",
"move_detections",
"(",
"label",
",",
"dy",
",",
"dx",
")",
":",
"for",
"k",
"in",
"label",
".",
"keys",
"(",
")",
":",
"if",
"k",
".",
"startswith",
"(",
"\"detection\"",
")",
":",
"detections",
"=",
"label",
"[",
"k",
"]",
"for",
"detection",
"in",
"detections",
":",
"detection",
".",
"move_image",
"(",
"-",
"dx",
",",
"-",
"dy",
")"
] |
77c888377095e1812a16982c8efbd2f6b1697a33
|
test
|
hflip_detections
|
Horizontally flip detections according to an image flip.
:param label: The label dict containing all detection lists.
:param w: The width of the image as a number.
:return:
|
opendatalake/detection/utils.py
|
def hflip_detections(label, w):
"""
Horizontally flip detections according to an image flip.
:param label: The label dict containing all detection lists.
:param w: The width of the image as a number.
:return:
"""
for k in label.keys():
if k.startswith("detection"):
detections = label[k]
for detection in detections:
detection.cx = w - detection.cx
if k == "detections_2.5d":
detection.theta = math.pi - detection.theta
|
def hflip_detections(label, w):
"""
Horizontally flip detections according to an image flip.
:param label: The label dict containing all detection lists.
:param w: The width of the image as a number.
:return:
"""
for k in label.keys():
if k.startswith("detection"):
detections = label[k]
for detection in detections:
detection.cx = w - detection.cx
if k == "detections_2.5d":
detection.theta = math.pi - detection.theta
|
[
"Horizontally",
"flip",
"detections",
"according",
"to",
"an",
"image",
"flip",
"."
] |
penguinmenac3/opendatalake
|
python
|
https://github.com/penguinmenac3/opendatalake/blob/77c888377095e1812a16982c8efbd2f6b1697a33/opendatalake/detection/utils.py#L787-L801
|
[
"def",
"hflip_detections",
"(",
"label",
",",
"w",
")",
":",
"for",
"k",
"in",
"label",
".",
"keys",
"(",
")",
":",
"if",
"k",
".",
"startswith",
"(",
"\"detection\"",
")",
":",
"detections",
"=",
"label",
"[",
"k",
"]",
"for",
"detection",
"in",
"detections",
":",
"detection",
".",
"cx",
"=",
"w",
"-",
"detection",
".",
"cx",
"if",
"k",
"==",
"\"detections_2.5d\"",
":",
"detection",
".",
"theta",
"=",
"math",
".",
"pi",
"-",
"detection",
".",
"theta"
] |
77c888377095e1812a16982c8efbd2f6b1697a33
|
test
|
augment_detections
|
Augment the detection dataset.
In your hyper_parameters.problem.augmentation add configurations to enable features.
Supports "enable_horizontal_flip", "enable_micro_translation", "random_crop" : {"shape": { "width", "height" }}
and "enable_texture_augmentation". Make sure to also set the "steps" otherwise this method will not be used properly.
Random crop ensures at least one detection is in the crop region.
Sample configuration
"problem": {
"augmentation": {
"steps": 40,
"enable_texture_augmentation": true,
"enable_micro_translation": true,
"enable_horizontal_flip": true,
"random_crop": {
"shape": {
"width": 256,
"height": 256
}
}
}
}
:param hyper_params: The hyper parameters object
:param feature: A dict containing all features, must be in the style created by detection datasets.
:param label: A label dict in the detection dataset style.
:return: Modified feature and label dict (copied & modified).
|
opendatalake/detection/utils.py
|
def augment_detections(hyper_params, feature, label):
"""
Augment the detection dataset.
In your hyper_parameters.problem.augmentation add configurations to enable features.
Supports "enable_horizontal_flip", "enable_micro_translation", "random_crop" : {"shape": { "width", "height" }}
and "enable_texture_augmentation". Make sure to also set the "steps" otherwise this method will not be used properly.
Random crop ensures at least one detection is in the crop region.
Sample configuration
"problem": {
"augmentation": {
"steps": 40,
"enable_texture_augmentation": true,
"enable_micro_translation": true,
"enable_horizontal_flip": true,
"random_crop": {
"shape": {
"width": 256,
"height": 256
}
}
}
}
:param hyper_params: The hyper parameters object
:param feature: A dict containing all features, must be in the style created by detection datasets.
:param label: A label dict in the detection dataset style.
:return: Modified feature and label dict (copied & modified).
"""
# Do not augment these ways:
# 1) Rotation is not possible
# 3) Scaling is not possible, because it ruins depth perception
# However, random crops can improve performance. (Training speed and accuracy)
if hyper_params.problem.get("augmentation", None) is None:
return feature, label
img_h, img_w, img_c = feature["image"].shape
augmented_feature = {}
augmented_label = {}
augmented_feature["image"] = feature["image"].copy()
if "depth" in feature:
augmented_feature["depth"] = feature["depth"].copy()
if "calibration" in feature:
augmented_feature["calibration"] = feature["calibration"]
augmented_feature["hflipped"] = np.array([0], dtype=np.uint8)
augmented_feature["crop_offset"] = np.array([0, 0], dtype=np.int8)
for k in label.keys():
augmented_label[k] = [detection.copy() for detection in label[k]]
if hyper_params.problem.augmentation.get("enable_horizontal_flip", False):
if random.random() < 0.5:
img_h, img_w, img_c = augmented_feature["image"].shape
augmented_feature["image"] = np.fliplr(augmented_feature["image"])
if "depth" in feature:
augmented_feature["depth"] = np.fliplr(augmented_feature["depth"])
augmented_feature["hflipped"][0] = 1
hflip_detections(augmented_label, img_w)
if hyper_params.problem.augmentation.get("enable_micro_translation", False):
img_h, img_w, img_c = augmented_feature["image"].shape
dx = int(random.random() * 3)
dy = int(random.random() * 3)
augmented_feature["image"] = crop_image(augmented_feature["image"], dy, dx, img_h - dy, img_w - dx)
if "depth" in feature:
augmented_feature["depth"] = crop_image(augmented_feature["depth"], dy, dx, img_h - dy, img_w - dx)
augmented_feature["crop_offset"][0] += dy
augmented_feature["crop_offset"][1] += dx
move_detections(augmented_label, -dy, -dx)
if hyper_params.problem.augmentation.get("random_crop", None) is not None:
img_h, img_w, img_c = augmented_feature["image"].shape
target_w = hyper_params.problem.augmentation.random_crop.shape.width
target_h = hyper_params.problem.augmentation.random_crop.shape.height
delta_x = max(int(math.ceil((target_w + 1 - img_w) / 2)), 0)
delta_y = max(int(math.ceil((target_h + 1 - img_h) / 2)), 0)
move_detections(augmented_label, delta_y, delta_x)
augmented_feature["image"] = cv2.copyMakeBorder(augmented_feature["image"],
delta_y, delta_y, delta_x, delta_x,
cv2.BORDER_CONSTANT)
img_h, img_w, img_c = augmented_feature["image"].shape
start_x = 0
start_y = 0
if len(augmented_label["detections_2d"]) != 0:
idx = random.randint(0, len(augmented_label["detections_2d"]) - 1)
detection = augmented_label["detections_2d"][idx]
start_x = int(detection.cx - random.random() * (target_w - 20) / 2.0 - 10)
start_y = int(detection.cy - random.random() * (target_h - 20) / 2.0 - 10)
else:
start_x = int(img_w * random.random())
start_y = int(img_h * random.random())
# Compute start point so that crop fit's into image and random crop contains detection
if start_x < 0:
start_x = 0
if start_y < 0:
start_y = 0
if start_x >= img_w - target_w:
start_x = img_w - target_w - 1
if start_y >= img_h - target_h:
start_y = img_h - target_h - 1
# Crop image
augmented_feature["image"] = crop_image(augmented_feature["image"], start_y, start_x, target_h, target_w)
if "depth" in feature:
augmented_feature["depth"] = crop_image(augmented_feature["depth"], start_y, start_x, target_h, target_w)
augmented_feature["crop_offset"][0] += start_y
augmented_feature["crop_offset"][1] += start_x
# Crop labels
move_detections(augmented_label, -start_y, -start_x)
if hyper_params.problem.augmentation.get("enable_texture_augmentation", False):
if random.random() < 0.5:
augmented_feature["image"] = full_texture_augmentation(augmented_feature["image"])
return augmented_feature, augmented_label
|
def augment_detections(hyper_params, feature, label):
"""
Augment the detection dataset.
In your hyper_parameters.problem.augmentation add configurations to enable features.
Supports "enable_horizontal_flip", "enable_micro_translation", "random_crop" : {"shape": { "width", "height" }}
and "enable_texture_augmentation". Make sure to also set the "steps" otherwise this method will not be used properly.
Random crop ensures at least one detection is in the crop region.
Sample configuration
"problem": {
"augmentation": {
"steps": 40,
"enable_texture_augmentation": true,
"enable_micro_translation": true,
"enable_horizontal_flip": true,
"random_crop": {
"shape": {
"width": 256,
"height": 256
}
}
}
}
:param hyper_params: The hyper parameters object
:param feature: A dict containing all features, must be in the style created by detection datasets.
:param label: A label dict in the detection dataset style.
:return: Modified feature and label dict (copied & modified).
"""
# Do not augment these ways:
# 1) Rotation is not possible
# 3) Scaling is not possible, because it ruins depth perception
# However, random crops can improve performance. (Training speed and accuracy)
if hyper_params.problem.get("augmentation", None) is None:
return feature, label
img_h, img_w, img_c = feature["image"].shape
augmented_feature = {}
augmented_label = {}
augmented_feature["image"] = feature["image"].copy()
if "depth" in feature:
augmented_feature["depth"] = feature["depth"].copy()
if "calibration" in feature:
augmented_feature["calibration"] = feature["calibration"]
augmented_feature["hflipped"] = np.array([0], dtype=np.uint8)
augmented_feature["crop_offset"] = np.array([0, 0], dtype=np.int8)
for k in label.keys():
augmented_label[k] = [detection.copy() for detection in label[k]]
if hyper_params.problem.augmentation.get("enable_horizontal_flip", False):
if random.random() < 0.5:
img_h, img_w, img_c = augmented_feature["image"].shape
augmented_feature["image"] = np.fliplr(augmented_feature["image"])
if "depth" in feature:
augmented_feature["depth"] = np.fliplr(augmented_feature["depth"])
augmented_feature["hflipped"][0] = 1
hflip_detections(augmented_label, img_w)
if hyper_params.problem.augmentation.get("enable_micro_translation", False):
img_h, img_w, img_c = augmented_feature["image"].shape
dx = int(random.random() * 3)
dy = int(random.random() * 3)
augmented_feature["image"] = crop_image(augmented_feature["image"], dy, dx, img_h - dy, img_w - dx)
if "depth" in feature:
augmented_feature["depth"] = crop_image(augmented_feature["depth"], dy, dx, img_h - dy, img_w - dx)
augmented_feature["crop_offset"][0] += dy
augmented_feature["crop_offset"][1] += dx
move_detections(augmented_label, -dy, -dx)
if hyper_params.problem.augmentation.get("random_crop", None) is not None:
img_h, img_w, img_c = augmented_feature["image"].shape
target_w = hyper_params.problem.augmentation.random_crop.shape.width
target_h = hyper_params.problem.augmentation.random_crop.shape.height
delta_x = max(int(math.ceil((target_w + 1 - img_w) / 2)), 0)
delta_y = max(int(math.ceil((target_h + 1 - img_h) / 2)), 0)
move_detections(augmented_label, delta_y, delta_x)
augmented_feature["image"] = cv2.copyMakeBorder(augmented_feature["image"],
delta_y, delta_y, delta_x, delta_x,
cv2.BORDER_CONSTANT)
img_h, img_w, img_c = augmented_feature["image"].shape
start_x = 0
start_y = 0
if len(augmented_label["detections_2d"]) != 0:
idx = random.randint(0, len(augmented_label["detections_2d"]) - 1)
detection = augmented_label["detections_2d"][idx]
start_x = int(detection.cx - random.random() * (target_w - 20) / 2.0 - 10)
start_y = int(detection.cy - random.random() * (target_h - 20) / 2.0 - 10)
else:
start_x = int(img_w * random.random())
start_y = int(img_h * random.random())
# Compute start point so that crop fit's into image and random crop contains detection
if start_x < 0:
start_x = 0
if start_y < 0:
start_y = 0
if start_x >= img_w - target_w:
start_x = img_w - target_w - 1
if start_y >= img_h - target_h:
start_y = img_h - target_h - 1
# Crop image
augmented_feature["image"] = crop_image(augmented_feature["image"], start_y, start_x, target_h, target_w)
if "depth" in feature:
augmented_feature["depth"] = crop_image(augmented_feature["depth"], start_y, start_x, target_h, target_w)
augmented_feature["crop_offset"][0] += start_y
augmented_feature["crop_offset"][1] += start_x
# Crop labels
move_detections(augmented_label, -start_y, -start_x)
if hyper_params.problem.augmentation.get("enable_texture_augmentation", False):
if random.random() < 0.5:
augmented_feature["image"] = full_texture_augmentation(augmented_feature["image"])
return augmented_feature, augmented_label
|
[
"Augment",
"the",
"detection",
"dataset",
"."
] |
penguinmenac3/opendatalake
|
python
|
https://github.com/penguinmenac3/opendatalake/blob/77c888377095e1812a16982c8efbd2f6b1697a33/opendatalake/detection/utils.py#L804-L926
|
[
"def",
"augment_detections",
"(",
"hyper_params",
",",
"feature",
",",
"label",
")",
":",
"# Do not augment these ways:",
"# 1) Rotation is not possible",
"# 3) Scaling is not possible, because it ruins depth perception",
"# However, random crops can improve performance. (Training speed and accuracy)",
"if",
"hyper_params",
".",
"problem",
".",
"get",
"(",
"\"augmentation\"",
",",
"None",
")",
"is",
"None",
":",
"return",
"feature",
",",
"label",
"img_h",
",",
"img_w",
",",
"img_c",
"=",
"feature",
"[",
"\"image\"",
"]",
".",
"shape",
"augmented_feature",
"=",
"{",
"}",
"augmented_label",
"=",
"{",
"}",
"augmented_feature",
"[",
"\"image\"",
"]",
"=",
"feature",
"[",
"\"image\"",
"]",
".",
"copy",
"(",
")",
"if",
"\"depth\"",
"in",
"feature",
":",
"augmented_feature",
"[",
"\"depth\"",
"]",
"=",
"feature",
"[",
"\"depth\"",
"]",
".",
"copy",
"(",
")",
"if",
"\"calibration\"",
"in",
"feature",
":",
"augmented_feature",
"[",
"\"calibration\"",
"]",
"=",
"feature",
"[",
"\"calibration\"",
"]",
"augmented_feature",
"[",
"\"hflipped\"",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"0",
"]",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"augmented_feature",
"[",
"\"crop_offset\"",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"0",
",",
"0",
"]",
",",
"dtype",
"=",
"np",
".",
"int8",
")",
"for",
"k",
"in",
"label",
".",
"keys",
"(",
")",
":",
"augmented_label",
"[",
"k",
"]",
"=",
"[",
"detection",
".",
"copy",
"(",
")",
"for",
"detection",
"in",
"label",
"[",
"k",
"]",
"]",
"if",
"hyper_params",
".",
"problem",
".",
"augmentation",
".",
"get",
"(",
"\"enable_horizontal_flip\"",
",",
"False",
")",
":",
"if",
"random",
".",
"random",
"(",
")",
"<",
"0.5",
":",
"img_h",
",",
"img_w",
",",
"img_c",
"=",
"augmented_feature",
"[",
"\"image\"",
"]",
".",
"shape",
"augmented_feature",
"[",
"\"image\"",
"]",
"=",
"np",
".",
"fliplr",
"(",
"augmented_feature",
"[",
"\"image\"",
"]",
")",
"if",
"\"depth\"",
"in",
"feature",
":",
"augmented_feature",
"[",
"\"depth\"",
"]",
"=",
"np",
".",
"fliplr",
"(",
"augmented_feature",
"[",
"\"depth\"",
"]",
")",
"augmented_feature",
"[",
"\"hflipped\"",
"]",
"[",
"0",
"]",
"=",
"1",
"hflip_detections",
"(",
"augmented_label",
",",
"img_w",
")",
"if",
"hyper_params",
".",
"problem",
".",
"augmentation",
".",
"get",
"(",
"\"enable_micro_translation\"",
",",
"False",
")",
":",
"img_h",
",",
"img_w",
",",
"img_c",
"=",
"augmented_feature",
"[",
"\"image\"",
"]",
".",
"shape",
"dx",
"=",
"int",
"(",
"random",
".",
"random",
"(",
")",
"*",
"3",
")",
"dy",
"=",
"int",
"(",
"random",
".",
"random",
"(",
")",
"*",
"3",
")",
"augmented_feature",
"[",
"\"image\"",
"]",
"=",
"crop_image",
"(",
"augmented_feature",
"[",
"\"image\"",
"]",
",",
"dy",
",",
"dx",
",",
"img_h",
"-",
"dy",
",",
"img_w",
"-",
"dx",
")",
"if",
"\"depth\"",
"in",
"feature",
":",
"augmented_feature",
"[",
"\"depth\"",
"]",
"=",
"crop_image",
"(",
"augmented_feature",
"[",
"\"depth\"",
"]",
",",
"dy",
",",
"dx",
",",
"img_h",
"-",
"dy",
",",
"img_w",
"-",
"dx",
")",
"augmented_feature",
"[",
"\"crop_offset\"",
"]",
"[",
"0",
"]",
"+=",
"dy",
"augmented_feature",
"[",
"\"crop_offset\"",
"]",
"[",
"1",
"]",
"+=",
"dx",
"move_detections",
"(",
"augmented_label",
",",
"-",
"dy",
",",
"-",
"dx",
")",
"if",
"hyper_params",
".",
"problem",
".",
"augmentation",
".",
"get",
"(",
"\"random_crop\"",
",",
"None",
")",
"is",
"not",
"None",
":",
"img_h",
",",
"img_w",
",",
"img_c",
"=",
"augmented_feature",
"[",
"\"image\"",
"]",
".",
"shape",
"target_w",
"=",
"hyper_params",
".",
"problem",
".",
"augmentation",
".",
"random_crop",
".",
"shape",
".",
"width",
"target_h",
"=",
"hyper_params",
".",
"problem",
".",
"augmentation",
".",
"random_crop",
".",
"shape",
".",
"height",
"delta_x",
"=",
"max",
"(",
"int",
"(",
"math",
".",
"ceil",
"(",
"(",
"target_w",
"+",
"1",
"-",
"img_w",
")",
"/",
"2",
")",
")",
",",
"0",
")",
"delta_y",
"=",
"max",
"(",
"int",
"(",
"math",
".",
"ceil",
"(",
"(",
"target_h",
"+",
"1",
"-",
"img_h",
")",
"/",
"2",
")",
")",
",",
"0",
")",
"move_detections",
"(",
"augmented_label",
",",
"delta_y",
",",
"delta_x",
")",
"augmented_feature",
"[",
"\"image\"",
"]",
"=",
"cv2",
".",
"copyMakeBorder",
"(",
"augmented_feature",
"[",
"\"image\"",
"]",
",",
"delta_y",
",",
"delta_y",
",",
"delta_x",
",",
"delta_x",
",",
"cv2",
".",
"BORDER_CONSTANT",
")",
"img_h",
",",
"img_w",
",",
"img_c",
"=",
"augmented_feature",
"[",
"\"image\"",
"]",
".",
"shape",
"start_x",
"=",
"0",
"start_y",
"=",
"0",
"if",
"len",
"(",
"augmented_label",
"[",
"\"detections_2d\"",
"]",
")",
"!=",
"0",
":",
"idx",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"len",
"(",
"augmented_label",
"[",
"\"detections_2d\"",
"]",
")",
"-",
"1",
")",
"detection",
"=",
"augmented_label",
"[",
"\"detections_2d\"",
"]",
"[",
"idx",
"]",
"start_x",
"=",
"int",
"(",
"detection",
".",
"cx",
"-",
"random",
".",
"random",
"(",
")",
"*",
"(",
"target_w",
"-",
"20",
")",
"/",
"2.0",
"-",
"10",
")",
"start_y",
"=",
"int",
"(",
"detection",
".",
"cy",
"-",
"random",
".",
"random",
"(",
")",
"*",
"(",
"target_h",
"-",
"20",
")",
"/",
"2.0",
"-",
"10",
")",
"else",
":",
"start_x",
"=",
"int",
"(",
"img_w",
"*",
"random",
".",
"random",
"(",
")",
")",
"start_y",
"=",
"int",
"(",
"img_h",
"*",
"random",
".",
"random",
"(",
")",
")",
"# Compute start point so that crop fit's into image and random crop contains detection",
"if",
"start_x",
"<",
"0",
":",
"start_x",
"=",
"0",
"if",
"start_y",
"<",
"0",
":",
"start_y",
"=",
"0",
"if",
"start_x",
">=",
"img_w",
"-",
"target_w",
":",
"start_x",
"=",
"img_w",
"-",
"target_w",
"-",
"1",
"if",
"start_y",
">=",
"img_h",
"-",
"target_h",
":",
"start_y",
"=",
"img_h",
"-",
"target_h",
"-",
"1",
"# Crop image",
"augmented_feature",
"[",
"\"image\"",
"]",
"=",
"crop_image",
"(",
"augmented_feature",
"[",
"\"image\"",
"]",
",",
"start_y",
",",
"start_x",
",",
"target_h",
",",
"target_w",
")",
"if",
"\"depth\"",
"in",
"feature",
":",
"augmented_feature",
"[",
"\"depth\"",
"]",
"=",
"crop_image",
"(",
"augmented_feature",
"[",
"\"depth\"",
"]",
",",
"start_y",
",",
"start_x",
",",
"target_h",
",",
"target_w",
")",
"augmented_feature",
"[",
"\"crop_offset\"",
"]",
"[",
"0",
"]",
"+=",
"start_y",
"augmented_feature",
"[",
"\"crop_offset\"",
"]",
"[",
"1",
"]",
"+=",
"start_x",
"# Crop labels",
"move_detections",
"(",
"augmented_label",
",",
"-",
"start_y",
",",
"-",
"start_x",
")",
"if",
"hyper_params",
".",
"problem",
".",
"augmentation",
".",
"get",
"(",
"\"enable_texture_augmentation\"",
",",
"False",
")",
":",
"if",
"random",
".",
"random",
"(",
")",
"<",
"0.5",
":",
"augmented_feature",
"[",
"\"image\"",
"]",
"=",
"full_texture_augmentation",
"(",
"augmented_feature",
"[",
"\"image\"",
"]",
")",
"return",
"augmented_feature",
",",
"augmented_label"
] |
77c888377095e1812a16982c8efbd2f6b1697a33
|
test
|
get_dict_from_obj
|
Edit to get the dict even when the object is a GenericRelatedObjectManager.
Added the try except.
|
inplaceeditform/commons.py
|
def get_dict_from_obj(obj):
'''
Edit to get the dict even when the object is a GenericRelatedObjectManager.
Added the try except.
'''
obj_dict = obj.__dict__
obj_dict_result = obj_dict.copy()
for key, value in obj_dict.items():
if key.endswith('_id'):
key2 = key.replace('_id', '')
try:
field, model, direct, m2m = obj._meta.get_field_by_name(key2)
if isinstance(field, ForeignKey):
obj_dict_result[key2] = obj_dict_result[key]
del obj_dict_result[key]
except FieldDoesNotExist:
pass
manytomany_list = obj._meta.many_to_many
for manytomany in manytomany_list:
pks = [obj_rel.pk for obj_rel in manytomany.value_from_object(obj).select_related()]
if pks:
obj_dict_result[manytomany.name] = pks
return obj_dict_result
|
def get_dict_from_obj(obj):
'''
Edit to get the dict even when the object is a GenericRelatedObjectManager.
Added the try except.
'''
obj_dict = obj.__dict__
obj_dict_result = obj_dict.copy()
for key, value in obj_dict.items():
if key.endswith('_id'):
key2 = key.replace('_id', '')
try:
field, model, direct, m2m = obj._meta.get_field_by_name(key2)
if isinstance(field, ForeignKey):
obj_dict_result[key2] = obj_dict_result[key]
del obj_dict_result[key]
except FieldDoesNotExist:
pass
manytomany_list = obj._meta.many_to_many
for manytomany in manytomany_list:
pks = [obj_rel.pk for obj_rel in manytomany.value_from_object(obj).select_related()]
if pks:
obj_dict_result[manytomany.name] = pks
return obj_dict_result
|
[
"Edit",
"to",
"get",
"the",
"dict",
"even",
"when",
"the",
"object",
"is",
"a",
"GenericRelatedObjectManager",
".",
"Added",
"the",
"try",
"except",
"."
] |
django-inplaceedit/django-inplaceedit
|
python
|
https://github.com/django-inplaceedit/django-inplaceedit/blob/7ba18e7906f56c56395ca07e2486755062efce00/inplaceeditform/commons.py#L35-L57
|
[
"def",
"get_dict_from_obj",
"(",
"obj",
")",
":",
"obj_dict",
"=",
"obj",
".",
"__dict__",
"obj_dict_result",
"=",
"obj_dict",
".",
"copy",
"(",
")",
"for",
"key",
",",
"value",
"in",
"obj_dict",
".",
"items",
"(",
")",
":",
"if",
"key",
".",
"endswith",
"(",
"'_id'",
")",
":",
"key2",
"=",
"key",
".",
"replace",
"(",
"'_id'",
",",
"''",
")",
"try",
":",
"field",
",",
"model",
",",
"direct",
",",
"m2m",
"=",
"obj",
".",
"_meta",
".",
"get_field_by_name",
"(",
"key2",
")",
"if",
"isinstance",
"(",
"field",
",",
"ForeignKey",
")",
":",
"obj_dict_result",
"[",
"key2",
"]",
"=",
"obj_dict_result",
"[",
"key",
"]",
"del",
"obj_dict_result",
"[",
"key",
"]",
"except",
"FieldDoesNotExist",
":",
"pass",
"manytomany_list",
"=",
"obj",
".",
"_meta",
".",
"many_to_many",
"for",
"manytomany",
"in",
"manytomany_list",
":",
"pks",
"=",
"[",
"obj_rel",
".",
"pk",
"for",
"obj_rel",
"in",
"manytomany",
".",
"value_from_object",
"(",
"obj",
")",
".",
"select_related",
"(",
")",
"]",
"if",
"pks",
":",
"obj_dict_result",
"[",
"manytomany",
".",
"name",
"]",
"=",
"pks",
"return",
"obj_dict_result"
] |
7ba18e7906f56c56395ca07e2486755062efce00
|
test
|
BaseAdaptorField.get_config
|
Get the arguments given to the template tag element and complete these
with the ones from the settings.py if necessary.
|
inplaceeditform/fields.py
|
def get_config(self, request, **kwargs):
"""
Get the arguments given to the template tag element and complete these
with the ones from the settings.py if necessary.
"""
config = kwargs
config_from_settings = deepcopy(inplace_settings.DEFAULT_INPLACE_EDIT_OPTIONS)
config_one_by_one = inplace_settings.DEFAULT_INPLACE_EDIT_OPTIONS_ONE_BY_ONE
if not config_one_by_one:
# Solution 1: Using default config only if none specified.
if not config and config_from_settings:
config = config_from_settings
else:
# Solution 2: Updating the configured config with the default one.
config = dict(config_from_settings, **config)
return config
|
def get_config(self, request, **kwargs):
"""
Get the arguments given to the template tag element and complete these
with the ones from the settings.py if necessary.
"""
config = kwargs
config_from_settings = deepcopy(inplace_settings.DEFAULT_INPLACE_EDIT_OPTIONS)
config_one_by_one = inplace_settings.DEFAULT_INPLACE_EDIT_OPTIONS_ONE_BY_ONE
if not config_one_by_one:
# Solution 1: Using default config only if none specified.
if not config and config_from_settings:
config = config_from_settings
else:
# Solution 2: Updating the configured config with the default one.
config = dict(config_from_settings, **config)
return config
|
[
"Get",
"the",
"arguments",
"given",
"to",
"the",
"template",
"tag",
"element",
"and",
"complete",
"these",
"with",
"the",
"ones",
"from",
"the",
"settings",
".",
"py",
"if",
"necessary",
"."
] |
django-inplaceedit/django-inplaceedit
|
python
|
https://github.com/django-inplaceedit/django-inplaceedit/blob/7ba18e7906f56c56395ca07e2486755062efce00/inplaceeditform/fields.py#L92-L109
|
[
"def",
"get_config",
"(",
"self",
",",
"request",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"kwargs",
"config_from_settings",
"=",
"deepcopy",
"(",
"inplace_settings",
".",
"DEFAULT_INPLACE_EDIT_OPTIONS",
")",
"config_one_by_one",
"=",
"inplace_settings",
".",
"DEFAULT_INPLACE_EDIT_OPTIONS_ONE_BY_ONE",
"if",
"not",
"config_one_by_one",
":",
"# Solution 1: Using default config only if none specified.",
"if",
"not",
"config",
"and",
"config_from_settings",
":",
"config",
"=",
"config_from_settings",
"else",
":",
"# Solution 2: Updating the configured config with the default one.",
"config",
"=",
"dict",
"(",
"config_from_settings",
",",
"*",
"*",
"config",
")",
"return",
"config"
] |
7ba18e7906f56c56395ca07e2486755062efce00
|
test
|
BaseAdaptorField.empty_value
|
Get the text to display when the field is empty.
|
inplaceeditform/fields.py
|
def empty_value(self):
'''
Get the text to display when the field is empty.
'''
edit_empty_value = self.config.get('edit_empty_value', False)
if edit_empty_value:
return edit_empty_value
else:
return unicode(inplace_settings.INPLACEEDIT_EDIT_EMPTY_VALUE)
|
def empty_value(self):
'''
Get the text to display when the field is empty.
'''
edit_empty_value = self.config.get('edit_empty_value', False)
if edit_empty_value:
return edit_empty_value
else:
return unicode(inplace_settings.INPLACEEDIT_EDIT_EMPTY_VALUE)
|
[
"Get",
"the",
"text",
"to",
"display",
"when",
"the",
"field",
"is",
"empty",
"."
] |
django-inplaceedit/django-inplaceedit
|
python
|
https://github.com/django-inplaceedit/django-inplaceedit/blob/7ba18e7906f56c56395ca07e2486755062efce00/inplaceeditform/fields.py#L143-L151
|
[
"def",
"empty_value",
"(",
"self",
")",
":",
"edit_empty_value",
"=",
"self",
".",
"config",
".",
"get",
"(",
"'edit_empty_value'",
",",
"False",
")",
"if",
"edit_empty_value",
":",
"return",
"edit_empty_value",
"else",
":",
"return",
"unicode",
"(",
"inplace_settings",
".",
"INPLACEEDIT_EDIT_EMPTY_VALUE",
")"
] |
7ba18e7906f56c56395ca07e2486755062efce00
|
test
|
do_eval
|
Usage: {% eval %}1 + 1{% endeval %}
|
inplaceeditform/templatetags/inplace_edit.py
|
def do_eval(parser, token):
"Usage: {% eval %}1 + 1{% endeval %}"
nodelist = parser.parse(('endeval',))
class EvalNode(template.Node):
def render(self, context):
return template.Template(nodelist.render(context)).render(template.Context(context))
parser.delete_first_token()
return EvalNode()
|
def do_eval(parser, token):
"Usage: {% eval %}1 + 1{% endeval %}"
nodelist = parser.parse(('endeval',))
class EvalNode(template.Node):
def render(self, context):
return template.Template(nodelist.render(context)).render(template.Context(context))
parser.delete_first_token()
return EvalNode()
|
[
"Usage",
":",
"{",
"%",
"eval",
"%",
"}",
"1",
"+",
"1",
"{",
"%",
"endeval",
"%",
"}"
] |
django-inplaceedit/django-inplaceedit
|
python
|
https://github.com/django-inplaceedit/django-inplaceedit/blob/7ba18e7906f56c56395ca07e2486755062efce00/inplaceeditform/templatetags/inplace_edit.py#L123-L132
|
[
"def",
"do_eval",
"(",
"parser",
",",
"token",
")",
":",
"nodelist",
"=",
"parser",
".",
"parse",
"(",
"(",
"'endeval'",
",",
")",
")",
"class",
"EvalNode",
"(",
"template",
".",
"Node",
")",
":",
"def",
"render",
"(",
"self",
",",
"context",
")",
":",
"return",
"template",
".",
"Template",
"(",
"nodelist",
".",
"render",
"(",
"context",
")",
")",
".",
"render",
"(",
"template",
".",
"Context",
"(",
"context",
")",
")",
"parser",
".",
"delete_first_token",
"(",
")",
"return",
"EvalNode",
"(",
")"
] |
7ba18e7906f56c56395ca07e2486755062efce00
|
test
|
parse_args_kwargs
|
Parse uniformly args and kwargs from a templatetag
Usage::
For parsing a template like this:
{% footag my_contents,height=10,zoom=20 as myvar %}
You simply do this:
@register.tag
def footag(parser, token):
args, kwargs = parse_args_kwargs(parser, token)
|
inplaceeditform/tag_utils.py
|
def parse_args_kwargs(parser, token):
"""
Parse uniformly args and kwargs from a templatetag
Usage::
For parsing a template like this:
{% footag my_contents,height=10,zoom=20 as myvar %}
You simply do this:
@register.tag
def footag(parser, token):
args, kwargs = parse_args_kwargs(parser, token)
"""
bits = token.contents.split(' ')
if len(bits) <= 1:
raise template.TemplateSyntaxError("'%s' takes at least one argument" % bits[0])
if token.contents[13] == '"':
end_quote = token.contents.index('"', 14) + 1
args = [template.Variable(token.contents[13:end_quote])]
kwargs_start = end_quote
else:
try:
next_space = token.contents.index(' ', 14)
kwargs_start = next_space + 1
except ValueError:
next_space = None
kwargs_start = None
args = [template.Variable(token.contents[13:next_space])]
kwargs = {}
kwargs_list = token.contents[kwargs_start:].split(',')
for kwargs_item in kwargs_list:
if '=' in kwargs_item:
k, v = kwargs_item.split('=', 1)
k = k.strip()
kwargs[k] = template.Variable(v)
return args, kwargs
|
def parse_args_kwargs(parser, token):
"""
Parse uniformly args and kwargs from a templatetag
Usage::
For parsing a template like this:
{% footag my_contents,height=10,zoom=20 as myvar %}
You simply do this:
@register.tag
def footag(parser, token):
args, kwargs = parse_args_kwargs(parser, token)
"""
bits = token.contents.split(' ')
if len(bits) <= 1:
raise template.TemplateSyntaxError("'%s' takes at least one argument" % bits[0])
if token.contents[13] == '"':
end_quote = token.contents.index('"', 14) + 1
args = [template.Variable(token.contents[13:end_quote])]
kwargs_start = end_quote
else:
try:
next_space = token.contents.index(' ', 14)
kwargs_start = next_space + 1
except ValueError:
next_space = None
kwargs_start = None
args = [template.Variable(token.contents[13:next_space])]
kwargs = {}
kwargs_list = token.contents[kwargs_start:].split(',')
for kwargs_item in kwargs_list:
if '=' in kwargs_item:
k, v = kwargs_item.split('=', 1)
k = k.strip()
kwargs[k] = template.Variable(v)
return args, kwargs
|
[
"Parse",
"uniformly",
"args",
"and",
"kwargs",
"from",
"a",
"templatetag"
] |
django-inplaceedit/django-inplaceedit
|
python
|
https://github.com/django-inplaceedit/django-inplaceedit/blob/7ba18e7906f56c56395ca07e2486755062efce00/inplaceeditform/tag_utils.py#L22-L63
|
[
"def",
"parse_args_kwargs",
"(",
"parser",
",",
"token",
")",
":",
"bits",
"=",
"token",
".",
"contents",
".",
"split",
"(",
"' '",
")",
"if",
"len",
"(",
"bits",
")",
"<=",
"1",
":",
"raise",
"template",
".",
"TemplateSyntaxError",
"(",
"\"'%s' takes at least one argument\"",
"%",
"bits",
"[",
"0",
"]",
")",
"if",
"token",
".",
"contents",
"[",
"13",
"]",
"==",
"'\"'",
":",
"end_quote",
"=",
"token",
".",
"contents",
".",
"index",
"(",
"'\"'",
",",
"14",
")",
"+",
"1",
"args",
"=",
"[",
"template",
".",
"Variable",
"(",
"token",
".",
"contents",
"[",
"13",
":",
"end_quote",
"]",
")",
"]",
"kwargs_start",
"=",
"end_quote",
"else",
":",
"try",
":",
"next_space",
"=",
"token",
".",
"contents",
".",
"index",
"(",
"' '",
",",
"14",
")",
"kwargs_start",
"=",
"next_space",
"+",
"1",
"except",
"ValueError",
":",
"next_space",
"=",
"None",
"kwargs_start",
"=",
"None",
"args",
"=",
"[",
"template",
".",
"Variable",
"(",
"token",
".",
"contents",
"[",
"13",
":",
"next_space",
"]",
")",
"]",
"kwargs",
"=",
"{",
"}",
"kwargs_list",
"=",
"token",
".",
"contents",
"[",
"kwargs_start",
":",
"]",
".",
"split",
"(",
"','",
")",
"for",
"kwargs_item",
"in",
"kwargs_list",
":",
"if",
"'='",
"in",
"kwargs_item",
":",
"k",
",",
"v",
"=",
"kwargs_item",
".",
"split",
"(",
"'='",
",",
"1",
")",
"k",
"=",
"k",
".",
"strip",
"(",
")",
"kwargs",
"[",
"k",
"]",
"=",
"template",
".",
"Variable",
"(",
"v",
")",
"return",
"args",
",",
"kwargs"
] |
7ba18e7906f56c56395ca07e2486755062efce00
|
test
|
PrometheusExporterScript.create_metrics
|
Create and register metrics from a list of MetricConfigs.
|
prometheus_aioexporter/script.py
|
def create_metrics(
self, metric_configs: Iterable[MetricConfig]) -> Dict[str, Metric]:
"""Create and register metrics from a list of MetricConfigs."""
return self.registry.create_metrics(metric_configs)
|
def create_metrics(
self, metric_configs: Iterable[MetricConfig]) -> Dict[str, Metric]:
"""Create and register metrics from a list of MetricConfigs."""
return self.registry.create_metrics(metric_configs)
|
[
"Create",
"and",
"register",
"metrics",
"from",
"a",
"list",
"of",
"MetricConfigs",
"."
] |
albertodonato/prometheus-aioexporter
|
python
|
https://github.com/albertodonato/prometheus-aioexporter/blob/e1b85544ce72bfaae9182597709a2ecede8c8242/prometheus_aioexporter/script.py#L93-L96
|
[
"def",
"create_metrics",
"(",
"self",
",",
"metric_configs",
":",
"Iterable",
"[",
"MetricConfig",
"]",
")",
"->",
"Dict",
"[",
"str",
",",
"Metric",
"]",
":",
"return",
"self",
".",
"registry",
".",
"create_metrics",
"(",
"metric_configs",
")"
] |
e1b85544ce72bfaae9182597709a2ecede8c8242
|
test
|
PrometheusExporterScript._setup_logging
|
Setup logging for the application and aiohttp.
|
prometheus_aioexporter/script.py
|
def _setup_logging(self, log_level: str):
"""Setup logging for the application and aiohttp."""
level = getattr(logging, log_level)
names = (
'aiohttp.access', 'aiohttp.internal', 'aiohttp.server',
'aiohttp.web', self.name)
for name in names:
setup_logger(name=name, stream=sys.stderr, level=level)
|
def _setup_logging(self, log_level: str):
"""Setup logging for the application and aiohttp."""
level = getattr(logging, log_level)
names = (
'aiohttp.access', 'aiohttp.internal', 'aiohttp.server',
'aiohttp.web', self.name)
for name in names:
setup_logger(name=name, stream=sys.stderr, level=level)
|
[
"Setup",
"logging",
"for",
"the",
"application",
"and",
"aiohttp",
"."
] |
albertodonato/prometheus-aioexporter
|
python
|
https://github.com/albertodonato/prometheus-aioexporter/blob/e1b85544ce72bfaae9182597709a2ecede8c8242/prometheus_aioexporter/script.py#L130-L137
|
[
"def",
"_setup_logging",
"(",
"self",
",",
"log_level",
":",
"str",
")",
":",
"level",
"=",
"getattr",
"(",
"logging",
",",
"log_level",
")",
"names",
"=",
"(",
"'aiohttp.access'",
",",
"'aiohttp.internal'",
",",
"'aiohttp.server'",
",",
"'aiohttp.web'",
",",
"self",
".",
"name",
")",
"for",
"name",
"in",
"names",
":",
"setup_logger",
"(",
"name",
"=",
"name",
",",
"stream",
"=",
"sys",
".",
"stderr",
",",
"level",
"=",
"level",
")"
] |
e1b85544ce72bfaae9182597709a2ecede8c8242
|
test
|
PrometheusExporterScript._configure_registry
|
Configure the MetricRegistry.
|
prometheus_aioexporter/script.py
|
def _configure_registry(self, include_process_stats: bool = False):
"""Configure the MetricRegistry."""
if include_process_stats:
self.registry.register_additional_collector(
ProcessCollector(registry=None))
|
def _configure_registry(self, include_process_stats: bool = False):
"""Configure the MetricRegistry."""
if include_process_stats:
self.registry.register_additional_collector(
ProcessCollector(registry=None))
|
[
"Configure",
"the",
"MetricRegistry",
"."
] |
albertodonato/prometheus-aioexporter
|
python
|
https://github.com/albertodonato/prometheus-aioexporter/blob/e1b85544ce72bfaae9182597709a2ecede8c8242/prometheus_aioexporter/script.py#L139-L143
|
[
"def",
"_configure_registry",
"(",
"self",
",",
"include_process_stats",
":",
"bool",
"=",
"False",
")",
":",
"if",
"include_process_stats",
":",
"self",
".",
"registry",
".",
"register_additional_collector",
"(",
"ProcessCollector",
"(",
"registry",
"=",
"None",
")",
")"
] |
e1b85544ce72bfaae9182597709a2ecede8c8242
|
test
|
PrometheusExporterScript._get_exporter
|
Return a :class:`PrometheusExporter` configured with args.
|
prometheus_aioexporter/script.py
|
def _get_exporter(self, args: argparse.Namespace) -> PrometheusExporter:
"""Return a :class:`PrometheusExporter` configured with args."""
exporter = PrometheusExporter(
self.name, self.description, args.host, args.port, self.registry)
exporter.app.on_startup.append(self.on_application_startup)
exporter.app.on_shutdown.append(self.on_application_shutdown)
return exporter
|
def _get_exporter(self, args: argparse.Namespace) -> PrometheusExporter:
"""Return a :class:`PrometheusExporter` configured with args."""
exporter = PrometheusExporter(
self.name, self.description, args.host, args.port, self.registry)
exporter.app.on_startup.append(self.on_application_startup)
exporter.app.on_shutdown.append(self.on_application_shutdown)
return exporter
|
[
"Return",
"a",
":",
"class",
":",
"PrometheusExporter",
"configured",
"with",
"args",
"."
] |
albertodonato/prometheus-aioexporter
|
python
|
https://github.com/albertodonato/prometheus-aioexporter/blob/e1b85544ce72bfaae9182597709a2ecede8c8242/prometheus_aioexporter/script.py#L145-L151
|
[
"def",
"_get_exporter",
"(",
"self",
",",
"args",
":",
"argparse",
".",
"Namespace",
")",
"->",
"PrometheusExporter",
":",
"exporter",
"=",
"PrometheusExporter",
"(",
"self",
".",
"name",
",",
"self",
".",
"description",
",",
"args",
".",
"host",
",",
"args",
".",
"port",
",",
"self",
".",
"registry",
")",
"exporter",
".",
"app",
".",
"on_startup",
".",
"append",
"(",
"self",
".",
"on_application_startup",
")",
"exporter",
".",
"app",
".",
"on_shutdown",
".",
"append",
"(",
"self",
".",
"on_application_shutdown",
")",
"return",
"exporter"
] |
e1b85544ce72bfaae9182597709a2ecede8c8242
|
test
|
MetricsRegistry.create_metrics
|
Create Prometheus metrics from a list of MetricConfigs.
|
prometheus_aioexporter/metric.py
|
def create_metrics(self,
configs: Iterable[MetricConfig]) -> Dict[str, Metric]:
"""Create Prometheus metrics from a list of MetricConfigs."""
metrics: Dict[str, Metric] = {
config.name: self._register_metric(config)
for config in configs
}
self._metrics.update(metrics)
return metrics
|
def create_metrics(self,
configs: Iterable[MetricConfig]) -> Dict[str, Metric]:
"""Create Prometheus metrics from a list of MetricConfigs."""
metrics: Dict[str, Metric] = {
config.name: self._register_metric(config)
for config in configs
}
self._metrics.update(metrics)
return metrics
|
[
"Create",
"Prometheus",
"metrics",
"from",
"a",
"list",
"of",
"MetricConfigs",
"."
] |
albertodonato/prometheus-aioexporter
|
python
|
https://github.com/albertodonato/prometheus-aioexporter/blob/e1b85544ce72bfaae9182597709a2ecede8c8242/prometheus_aioexporter/metric.py#L80-L88
|
[
"def",
"create_metrics",
"(",
"self",
",",
"configs",
":",
"Iterable",
"[",
"MetricConfig",
"]",
")",
"->",
"Dict",
"[",
"str",
",",
"Metric",
"]",
":",
"metrics",
":",
"Dict",
"[",
"str",
",",
"Metric",
"]",
"=",
"{",
"config",
".",
"name",
":",
"self",
".",
"_register_metric",
"(",
"config",
")",
"for",
"config",
"in",
"configs",
"}",
"self",
".",
"_metrics",
".",
"update",
"(",
"metrics",
")",
"return",
"metrics"
] |
e1b85544ce72bfaae9182597709a2ecede8c8242
|
test
|
MetricsRegistry.get_metric
|
Return a metric, optionally configured with labels.
|
prometheus_aioexporter/metric.py
|
def get_metric(
self, name: str,
labels: Union[Dict[str, str], None] = None) -> Metric:
"""Return a metric, optionally configured with labels."""
metric = self._metrics[name]
if labels:
return metric.labels(**labels)
return metric
|
def get_metric(
self, name: str,
labels: Union[Dict[str, str], None] = None) -> Metric:
"""Return a metric, optionally configured with labels."""
metric = self._metrics[name]
if labels:
return metric.labels(**labels)
return metric
|
[
"Return",
"a",
"metric",
"optionally",
"configured",
"with",
"labels",
"."
] |
albertodonato/prometheus-aioexporter
|
python
|
https://github.com/albertodonato/prometheus-aioexporter/blob/e1b85544ce72bfaae9182597709a2ecede8c8242/prometheus_aioexporter/metric.py#L90-L98
|
[
"def",
"get_metric",
"(",
"self",
",",
"name",
":",
"str",
",",
"labels",
":",
"Union",
"[",
"Dict",
"[",
"str",
",",
"str",
"]",
",",
"None",
"]",
"=",
"None",
")",
"->",
"Metric",
":",
"metric",
"=",
"self",
".",
"_metrics",
"[",
"name",
"]",
"if",
"labels",
":",
"return",
"metric",
".",
"labels",
"(",
"*",
"*",
"labels",
")",
"return",
"metric"
] |
e1b85544ce72bfaae9182597709a2ecede8c8242
|
test
|
PrometheusExporter.run
|
Run the :class:`aiohttp.web.Application` for the exporter.
|
prometheus_aioexporter/web.py
|
def run(self):
"""Run the :class:`aiohttp.web.Application` for the exporter."""
run_app(
self.app,
host=self.host,
port=self.port,
print=lambda *args, **kargs: None,
access_log_format='%a "%r" %s %b "%{Referrer}i" "%{User-Agent}i"')
|
def run(self):
"""Run the :class:`aiohttp.web.Application` for the exporter."""
run_app(
self.app,
host=self.host,
port=self.port,
print=lambda *args, **kargs: None,
access_log_format='%a "%r" %s %b "%{Referrer}i" "%{User-Agent}i"')
|
[
"Run",
"the",
":",
"class",
":",
"aiohttp",
".",
"web",
".",
"Application",
"for",
"the",
"exporter",
"."
] |
albertodonato/prometheus-aioexporter
|
python
|
https://github.com/albertodonato/prometheus-aioexporter/blob/e1b85544ce72bfaae9182597709a2ecede8c8242/prometheus_aioexporter/web.py#L62-L69
|
[
"def",
"run",
"(",
"self",
")",
":",
"run_app",
"(",
"self",
".",
"app",
",",
"host",
"=",
"self",
".",
"host",
",",
"port",
"=",
"self",
".",
"port",
",",
"print",
"=",
"lambda",
"*",
"args",
",",
"*",
"*",
"kargs",
":",
"None",
",",
"access_log_format",
"=",
"'%a \"%r\" %s %b \"%{Referrer}i\" \"%{User-Agent}i\"'",
")"
] |
e1b85544ce72bfaae9182597709a2ecede8c8242
|
test
|
PrometheusExporter._make_application
|
Setup an :class:`aiohttp.web.Application`.
|
prometheus_aioexporter/web.py
|
def _make_application(self) -> Application:
"""Setup an :class:`aiohttp.web.Application`."""
app = Application()
app['exporter'] = self
app.router.add_get('/', self._handle_home)
app.router.add_get('/metrics', self._handle_metrics)
app.on_startup.append(self._log_startup_message)
return app
|
def _make_application(self) -> Application:
"""Setup an :class:`aiohttp.web.Application`."""
app = Application()
app['exporter'] = self
app.router.add_get('/', self._handle_home)
app.router.add_get('/metrics', self._handle_metrics)
app.on_startup.append(self._log_startup_message)
return app
|
[
"Setup",
"an",
":",
"class",
":",
"aiohttp",
".",
"web",
".",
"Application",
"."
] |
albertodonato/prometheus-aioexporter
|
python
|
https://github.com/albertodonato/prometheus-aioexporter/blob/e1b85544ce72bfaae9182597709a2ecede8c8242/prometheus_aioexporter/web.py#L71-L78
|
[
"def",
"_make_application",
"(",
"self",
")",
"->",
"Application",
":",
"app",
"=",
"Application",
"(",
")",
"app",
"[",
"'exporter'",
"]",
"=",
"self",
"app",
".",
"router",
".",
"add_get",
"(",
"'/'",
",",
"self",
".",
"_handle_home",
")",
"app",
".",
"router",
".",
"add_get",
"(",
"'/metrics'",
",",
"self",
".",
"_handle_metrics",
")",
"app",
".",
"on_startup",
".",
"append",
"(",
"self",
".",
"_log_startup_message",
")",
"return",
"app"
] |
e1b85544ce72bfaae9182597709a2ecede8c8242
|
test
|
PrometheusExporter._handle_home
|
Home page request handler.
|
prometheus_aioexporter/web.py
|
async def _handle_home(self, request: Request) -> Response:
"""Home page request handler."""
if self.description:
title = f'{self.name} - {self.description}'
else:
title = self.name
text = dedent(
f'''<!DOCTYPE html>
<html>
<head>
<title>{title}</title>
</head>
<body>
<h1>{title}</h1>
<p>
Metric are exported at the
<a href="/metrics">/metrics</a> endpoint.
</p>
</body>
</html>
''')
return Response(content_type='text/html', text=text)
|
async def _handle_home(self, request: Request) -> Response:
"""Home page request handler."""
if self.description:
title = f'{self.name} - {self.description}'
else:
title = self.name
text = dedent(
f'''<!DOCTYPE html>
<html>
<head>
<title>{title}</title>
</head>
<body>
<h1>{title}</h1>
<p>
Metric are exported at the
<a href="/metrics">/metrics</a> endpoint.
</p>
</body>
</html>
''')
return Response(content_type='text/html', text=text)
|
[
"Home",
"page",
"request",
"handler",
"."
] |
albertodonato/prometheus-aioexporter
|
python
|
https://github.com/albertodonato/prometheus-aioexporter/blob/e1b85544ce72bfaae9182597709a2ecede8c8242/prometheus_aioexporter/web.py#L84-L106
|
[
"async",
"def",
"_handle_home",
"(",
"self",
",",
"request",
":",
"Request",
")",
"->",
"Response",
":",
"if",
"self",
".",
"description",
":",
"title",
"=",
"f'{self.name} - {self.description}'",
"else",
":",
"title",
"=",
"self",
".",
"name",
"text",
"=",
"dedent",
"(",
"f'''<!DOCTYPE html>\n <html>\n <head>\n <title>{title}</title>\n </head>\n <body>\n <h1>{title}</h1>\n <p>\n Metric are exported at the\n <a href=\"/metrics\">/metrics</a> endpoint.\n </p>\n </body>\n </html>\n '''",
")",
"return",
"Response",
"(",
"content_type",
"=",
"'text/html'",
",",
"text",
"=",
"text",
")"
] |
e1b85544ce72bfaae9182597709a2ecede8c8242
|
test
|
PrometheusExporter._handle_metrics
|
Handler for metrics.
|
prometheus_aioexporter/web.py
|
async def _handle_metrics(self, request: Request) -> Response:
"""Handler for metrics."""
if self._update_handler:
await self._update_handler(self.registry.get_metrics())
response = Response(body=self.registry.generate_metrics())
response.content_type = CONTENT_TYPE_LATEST
return response
|
async def _handle_metrics(self, request: Request) -> Response:
"""Handler for metrics."""
if self._update_handler:
await self._update_handler(self.registry.get_metrics())
response = Response(body=self.registry.generate_metrics())
response.content_type = CONTENT_TYPE_LATEST
return response
|
[
"Handler",
"for",
"metrics",
"."
] |
albertodonato/prometheus-aioexporter
|
python
|
https://github.com/albertodonato/prometheus-aioexporter/blob/e1b85544ce72bfaae9182597709a2ecede8c8242/prometheus_aioexporter/web.py#L108-L114
|
[
"async",
"def",
"_handle_metrics",
"(",
"self",
",",
"request",
":",
"Request",
")",
"->",
"Response",
":",
"if",
"self",
".",
"_update_handler",
":",
"await",
"self",
".",
"_update_handler",
"(",
"self",
".",
"registry",
".",
"get_metrics",
"(",
")",
")",
"response",
"=",
"Response",
"(",
"body",
"=",
"self",
".",
"registry",
".",
"generate_metrics",
"(",
")",
")",
"response",
".",
"content_type",
"=",
"CONTENT_TYPE_LATEST",
"return",
"response"
] |
e1b85544ce72bfaae9182597709a2ecede8c8242
|
test
|
wa
|
A free-text query resolver by Wolfram|Alpha. Returns the first
result, if available.
|
wolframalpha/pmxbot.py
|
def wa(client, event, channel, nick, rest):
"""
A free-text query resolver by Wolfram|Alpha. Returns the first
result, if available.
"""
client = wolframalpha.Client(pmxbot.config['Wolfram|Alpha API key'])
res = client.query(rest)
return next(res.results).text
|
def wa(client, event, channel, nick, rest):
"""
A free-text query resolver by Wolfram|Alpha. Returns the first
result, if available.
"""
client = wolframalpha.Client(pmxbot.config['Wolfram|Alpha API key'])
res = client.query(rest)
return next(res.results).text
|
[
"A",
"free",
"-",
"text",
"query",
"resolver",
"by",
"Wolfram|Alpha",
".",
"Returns",
"the",
"first",
"result",
"if",
"available",
"."
] |
jaraco/wolframalpha
|
python
|
https://github.com/jaraco/wolframalpha/blob/50bf2e047b698e308a9a88770a23e7e210aa5bcb/wolframalpha/pmxbot.py#L11-L18
|
[
"def",
"wa",
"(",
"client",
",",
"event",
",",
"channel",
",",
"nick",
",",
"rest",
")",
":",
"client",
"=",
"wolframalpha",
".",
"Client",
"(",
"pmxbot",
".",
"config",
"[",
"'Wolfram|Alpha API key'",
"]",
")",
"res",
"=",
"client",
".",
"query",
"(",
"rest",
")",
"return",
"next",
"(",
"res",
".",
"results",
")",
".",
"text"
] |
50bf2e047b698e308a9a88770a23e7e210aa5bcb
|
test
|
fix_HTTPMessage
|
Python 2 uses a deprecated method signature and doesn't provide the
forward compatibility.
Add it.
|
wolframalpha/compat.py
|
def fix_HTTPMessage():
"""
Python 2 uses a deprecated method signature and doesn't provide the
forward compatibility.
Add it.
"""
if six.PY3:
return
http_client.HTTPMessage.get_content_type = http_client.HTTPMessage.gettype
http_client.HTTPMessage.get_param = http_client.HTTPMessage.getparam
|
def fix_HTTPMessage():
"""
Python 2 uses a deprecated method signature and doesn't provide the
forward compatibility.
Add it.
"""
if six.PY3:
return
http_client.HTTPMessage.get_content_type = http_client.HTTPMessage.gettype
http_client.HTTPMessage.get_param = http_client.HTTPMessage.getparam
|
[
"Python",
"2",
"uses",
"a",
"deprecated",
"method",
"signature",
"and",
"doesn",
"t",
"provide",
"the",
"forward",
"compatibility",
".",
"Add",
"it",
"."
] |
jaraco/wolframalpha
|
python
|
https://github.com/jaraco/wolframalpha/blob/50bf2e047b698e308a9a88770a23e7e210aa5bcb/wolframalpha/compat.py#L5-L15
|
[
"def",
"fix_HTTPMessage",
"(",
")",
":",
"if",
"six",
".",
"PY3",
":",
"return",
"http_client",
".",
"HTTPMessage",
".",
"get_content_type",
"=",
"http_client",
".",
"HTTPMessage",
".",
"gettype",
"http_client",
".",
"HTTPMessage",
".",
"get_param",
"=",
"http_client",
".",
"HTTPMessage",
".",
"getparam"
] |
50bf2e047b698e308a9a88770a23e7e210aa5bcb
|
test
|
Client.query
|
Query Wolfram|Alpha using the v2.0 API
Allows for arbitrary parameters to be passed in
the query. For example, to pass assumptions:
client.query(input='pi', assumption='*C.pi-_*NamedConstant-')
To pass multiple assumptions, pass multiple items
as params:
params = (
('assumption', '*C.pi-_*NamedConstant-'),
('assumption', 'DateOrder_**Day.Month.Year--'),
)
client.query(input='pi', params=params)
For more details on Assumptions, see
https://products.wolframalpha.com/api/documentation.html#6
|
wolframalpha/__init__.py
|
def query(self, input, params=(), **kwargs):
"""
Query Wolfram|Alpha using the v2.0 API
Allows for arbitrary parameters to be passed in
the query. For example, to pass assumptions:
client.query(input='pi', assumption='*C.pi-_*NamedConstant-')
To pass multiple assumptions, pass multiple items
as params:
params = (
('assumption', '*C.pi-_*NamedConstant-'),
('assumption', 'DateOrder_**Day.Month.Year--'),
)
client.query(input='pi', params=params)
For more details on Assumptions, see
https://products.wolframalpha.com/api/documentation.html#6
"""
data = dict(
input=input,
appid=self.app_id,
)
data = itertools.chain(params, data.items(), kwargs.items())
query = urllib.parse.urlencode(tuple(data))
url = 'https://api.wolframalpha.com/v2/query?' + query
resp = urllib.request.urlopen(url)
assert resp.headers.get_content_type() == 'text/xml'
assert resp.headers.get_param('charset') == 'utf-8'
return Result(resp)
|
def query(self, input, params=(), **kwargs):
"""
Query Wolfram|Alpha using the v2.0 API
Allows for arbitrary parameters to be passed in
the query. For example, to pass assumptions:
client.query(input='pi', assumption='*C.pi-_*NamedConstant-')
To pass multiple assumptions, pass multiple items
as params:
params = (
('assumption', '*C.pi-_*NamedConstant-'),
('assumption', 'DateOrder_**Day.Month.Year--'),
)
client.query(input='pi', params=params)
For more details on Assumptions, see
https://products.wolframalpha.com/api/documentation.html#6
"""
data = dict(
input=input,
appid=self.app_id,
)
data = itertools.chain(params, data.items(), kwargs.items())
query = urllib.parse.urlencode(tuple(data))
url = 'https://api.wolframalpha.com/v2/query?' + query
resp = urllib.request.urlopen(url)
assert resp.headers.get_content_type() == 'text/xml'
assert resp.headers.get_param('charset') == 'utf-8'
return Result(resp)
|
[
"Query",
"Wolfram|Alpha",
"using",
"the",
"v2",
".",
"0",
"API"
] |
jaraco/wolframalpha
|
python
|
https://github.com/jaraco/wolframalpha/blob/50bf2e047b698e308a9a88770a23e7e210aa5bcb/wolframalpha/__init__.py#L24-L56
|
[
"def",
"query",
"(",
"self",
",",
"input",
",",
"params",
"=",
"(",
")",
",",
"*",
"*",
"kwargs",
")",
":",
"data",
"=",
"dict",
"(",
"input",
"=",
"input",
",",
"appid",
"=",
"self",
".",
"app_id",
",",
")",
"data",
"=",
"itertools",
".",
"chain",
"(",
"params",
",",
"data",
".",
"items",
"(",
")",
",",
"kwargs",
".",
"items",
"(",
")",
")",
"query",
"=",
"urllib",
".",
"parse",
".",
"urlencode",
"(",
"tuple",
"(",
"data",
")",
")",
"url",
"=",
"'https://api.wolframalpha.com/v2/query?'",
"+",
"query",
"resp",
"=",
"urllib",
".",
"request",
".",
"urlopen",
"(",
"url",
")",
"assert",
"resp",
".",
"headers",
".",
"get_content_type",
"(",
")",
"==",
"'text/xml'",
"assert",
"resp",
".",
"headers",
".",
"get_param",
"(",
"'charset'",
")",
"==",
"'utf-8'",
"return",
"Result",
"(",
"resp",
")"
] |
50bf2e047b698e308a9a88770a23e7e210aa5bcb
|
test
|
Result.info
|
The pods, assumptions, and warnings of this result.
|
wolframalpha/__init__.py
|
def info(self):
"""
The pods, assumptions, and warnings of this result.
"""
return itertools.chain(self.pods, self.assumptions, self.warnings)
|
def info(self):
"""
The pods, assumptions, and warnings of this result.
"""
return itertools.chain(self.pods, self.assumptions, self.warnings)
|
[
"The",
"pods",
"assumptions",
"and",
"warnings",
"of",
"this",
"result",
"."
] |
jaraco/wolframalpha
|
python
|
https://github.com/jaraco/wolframalpha/blob/50bf2e047b698e308a9a88770a23e7e210aa5bcb/wolframalpha/__init__.py#L181-L185
|
[
"def",
"info",
"(",
"self",
")",
":",
"return",
"itertools",
".",
"chain",
"(",
"self",
".",
"pods",
",",
"self",
".",
"assumptions",
",",
"self",
".",
"warnings",
")"
] |
50bf2e047b698e308a9a88770a23e7e210aa5bcb
|
test
|
Result.results
|
The pods that hold the response to a simple, discrete query.
|
wolframalpha/__init__.py
|
def results(self):
"""
The pods that hold the response to a simple, discrete query.
"""
return (
pod
for pod in self.pods
if pod.primary
or pod.title == 'Result'
)
|
def results(self):
"""
The pods that hold the response to a simple, discrete query.
"""
return (
pod
for pod in self.pods
if pod.primary
or pod.title == 'Result'
)
|
[
"The",
"pods",
"that",
"hold",
"the",
"response",
"to",
"a",
"simple",
"discrete",
"query",
"."
] |
jaraco/wolframalpha
|
python
|
https://github.com/jaraco/wolframalpha/blob/50bf2e047b698e308a9a88770a23e7e210aa5bcb/wolframalpha/__init__.py#L206-L215
|
[
"def",
"results",
"(",
"self",
")",
":",
"return",
"(",
"pod",
"for",
"pod",
"in",
"self",
".",
"pods",
"if",
"pod",
".",
"primary",
"or",
"pod",
".",
"title",
"==",
"'Result'",
")"
] |
50bf2e047b698e308a9a88770a23e7e210aa5bcb
|
test
|
ApiClient.encode
|
Add request content data to request body, set Content-type header.
Should be overridden by subclasses if not using JSON encoding.
Args:
request (HTTPRequest): The request object.
data (dict, None): Data to be encoded.
Returns:
HTTPRequest: The request object.
|
nerd/client.py
|
def encode(request, data):
""" Add request content data to request body, set Content-type header.
Should be overridden by subclasses if not using JSON encoding.
Args:
request (HTTPRequest): The request object.
data (dict, None): Data to be encoded.
Returns:
HTTPRequest: The request object.
"""
if data is None:
return request
request.add_header('Content-Type', 'application/json')
request.data = json.dumps(data)
return request
|
def encode(request, data):
""" Add request content data to request body, set Content-type header.
Should be overridden by subclasses if not using JSON encoding.
Args:
request (HTTPRequest): The request object.
data (dict, None): Data to be encoded.
Returns:
HTTPRequest: The request object.
"""
if data is None:
return request
request.add_header('Content-Type', 'application/json')
request.data = json.dumps(data)
return request
|
[
"Add",
"request",
"content",
"data",
"to",
"request",
"body",
"set",
"Content",
"-",
"type",
"header",
"."
] |
hirmeos/entity-fishing-client-python
|
python
|
https://github.com/hirmeos/entity-fishing-client-python/blob/cd5c6e10c6c4e653669e11d735d5773766986bda/nerd/client.py#L45-L63
|
[
"def",
"encode",
"(",
"request",
",",
"data",
")",
":",
"if",
"data",
"is",
"None",
":",
"return",
"request",
"request",
".",
"add_header",
"(",
"'Content-Type'",
",",
"'application/json'",
")",
"request",
".",
"data",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
"return",
"request"
] |
cd5c6e10c6c4e653669e11d735d5773766986bda
|
test
|
ApiClient.call_api
|
Call API.
This returns object containing data, with error details if applicable.
Args:
method (str): The HTTP method to use.
url (str): Resource location relative to the base URL.
headers (dict or None): Extra request headers to set.
params (dict or None): Query-string parameters.
data (dict or None): Request body contents for POST or PUT requests.
files (dict or None: Files to be passed to the request.
timeout (int): Maximum time before timing out.
Returns:
ResultParser or ErrorParser.
|
nerd/client.py
|
def call_api(
self,
method,
url,
headers=None,
params=None,
data=None,
files=None,
timeout=None,
):
""" Call API.
This returns object containing data, with error details if applicable.
Args:
method (str): The HTTP method to use.
url (str): Resource location relative to the base URL.
headers (dict or None): Extra request headers to set.
params (dict or None): Query-string parameters.
data (dict or None): Request body contents for POST or PUT requests.
files (dict or None: Files to be passed to the request.
timeout (int): Maximum time before timing out.
Returns:
ResultParser or ErrorParser.
"""
method = method.upper()
headers = deepcopy(headers) or {}
headers['Accept'] = self.accept_type
params = deepcopy(params) or {}
data = data or {}
files = files or {}
if self.username and self.api_key:
params.update(self.get_credentials())
url = urljoin(self.base_url, url)
r = requests.request(
method,
url,
headers=headers,
params=params,
files=files,
data=data,
timeout=timeout,
)
return r, r.status_code
|
def call_api(
self,
method,
url,
headers=None,
params=None,
data=None,
files=None,
timeout=None,
):
""" Call API.
This returns object containing data, with error details if applicable.
Args:
method (str): The HTTP method to use.
url (str): Resource location relative to the base URL.
headers (dict or None): Extra request headers to set.
params (dict or None): Query-string parameters.
data (dict or None): Request body contents for POST or PUT requests.
files (dict or None: Files to be passed to the request.
timeout (int): Maximum time before timing out.
Returns:
ResultParser or ErrorParser.
"""
method = method.upper()
headers = deepcopy(headers) or {}
headers['Accept'] = self.accept_type
params = deepcopy(params) or {}
data = data or {}
files = files or {}
if self.username and self.api_key:
params.update(self.get_credentials())
url = urljoin(self.base_url, url)
r = requests.request(
method,
url,
headers=headers,
params=params,
files=files,
data=data,
timeout=timeout,
)
return r, r.status_code
|
[
"Call",
"API",
"."
] |
hirmeos/entity-fishing-client-python
|
python
|
https://github.com/hirmeos/entity-fishing-client-python/blob/cd5c6e10c6c4e653669e11d735d5773766986bda/nerd/client.py#L93-L141
|
[
"def",
"call_api",
"(",
"self",
",",
"method",
",",
"url",
",",
"headers",
"=",
"None",
",",
"params",
"=",
"None",
",",
"data",
"=",
"None",
",",
"files",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
")",
":",
"method",
"=",
"method",
".",
"upper",
"(",
")",
"headers",
"=",
"deepcopy",
"(",
"headers",
")",
"or",
"{",
"}",
"headers",
"[",
"'Accept'",
"]",
"=",
"self",
".",
"accept_type",
"params",
"=",
"deepcopy",
"(",
"params",
")",
"or",
"{",
"}",
"data",
"=",
"data",
"or",
"{",
"}",
"files",
"=",
"files",
"or",
"{",
"}",
"if",
"self",
".",
"username",
"and",
"self",
".",
"api_key",
":",
"params",
".",
"update",
"(",
"self",
".",
"get_credentials",
"(",
")",
")",
"url",
"=",
"urljoin",
"(",
"self",
".",
"base_url",
",",
"url",
")",
"r",
"=",
"requests",
".",
"request",
"(",
"method",
",",
"url",
",",
"headers",
"=",
"headers",
",",
"params",
"=",
"params",
",",
"files",
"=",
"files",
",",
"data",
"=",
"data",
",",
"timeout",
"=",
"timeout",
",",
")",
"return",
"r",
",",
"r",
".",
"status_code"
] |
cd5c6e10c6c4e653669e11d735d5773766986bda
|
test
|
ApiClient.get
|
Call the API with a GET request.
Args:
url (str): Resource location relative to the base URL.
params (dict or None): Query-string parameters.
Returns:
ResultParser or ErrorParser.
|
nerd/client.py
|
def get(self, url, params=None, **kwargs):
""" Call the API with a GET request.
Args:
url (str): Resource location relative to the base URL.
params (dict or None): Query-string parameters.
Returns:
ResultParser or ErrorParser.
"""
return self.call_api(
"GET",
url,
params=params,
**kwargs
)
|
def get(self, url, params=None, **kwargs):
""" Call the API with a GET request.
Args:
url (str): Resource location relative to the base URL.
params (dict or None): Query-string parameters.
Returns:
ResultParser or ErrorParser.
"""
return self.call_api(
"GET",
url,
params=params,
**kwargs
)
|
[
"Call",
"the",
"API",
"with",
"a",
"GET",
"request",
"."
] |
hirmeos/entity-fishing-client-python
|
python
|
https://github.com/hirmeos/entity-fishing-client-python/blob/cd5c6e10c6c4e653669e11d735d5773766986bda/nerd/client.py#L143-L158
|
[
"def",
"get",
"(",
"self",
",",
"url",
",",
"params",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"call_api",
"(",
"\"GET\"",
",",
"url",
",",
"params",
"=",
"params",
",",
"*",
"*",
"kwargs",
")"
] |
cd5c6e10c6c4e653669e11d735d5773766986bda
|
test
|
ApiClient.delete
|
Call the API with a DELETE request.
Args:
url (str): Resource location relative to the base URL.
params (dict or None): Query-string parameters.
Returns:
ResultParser or ErrorParser.
|
nerd/client.py
|
def delete(self, url, params=None, **kwargs):
""" Call the API with a DELETE request.
Args:
url (str): Resource location relative to the base URL.
params (dict or None): Query-string parameters.
Returns:
ResultParser or ErrorParser.
"""
return self.call_api(
"DELETE",
url,
params=params,
**kwargs
)
|
def delete(self, url, params=None, **kwargs):
""" Call the API with a DELETE request.
Args:
url (str): Resource location relative to the base URL.
params (dict or None): Query-string parameters.
Returns:
ResultParser or ErrorParser.
"""
return self.call_api(
"DELETE",
url,
params=params,
**kwargs
)
|
[
"Call",
"the",
"API",
"with",
"a",
"DELETE",
"request",
"."
] |
hirmeos/entity-fishing-client-python
|
python
|
https://github.com/hirmeos/entity-fishing-client-python/blob/cd5c6e10c6c4e653669e11d735d5773766986bda/nerd/client.py#L160-L175
|
[
"def",
"delete",
"(",
"self",
",",
"url",
",",
"params",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"call_api",
"(",
"\"DELETE\"",
",",
"url",
",",
"params",
"=",
"params",
",",
"*",
"*",
"kwargs",
")"
] |
cd5c6e10c6c4e653669e11d735d5773766986bda
|
test
|
ApiClient.put
|
Call the API with a PUT request.
Args:
url (str): Resource location relative to the base URL.
params (dict or None): Query-string parameters.
data (dict or None): Request body contents.
files (dict or None: Files to be passed to the request.
Returns:
An instance of ResultParser or ErrorParser.
|
nerd/client.py
|
def put(self, url, params=None, data=None, files=None, **kwargs):
""" Call the API with a PUT request.
Args:
url (str): Resource location relative to the base URL.
params (dict or None): Query-string parameters.
data (dict or None): Request body contents.
files (dict or None: Files to be passed to the request.
Returns:
An instance of ResultParser or ErrorParser.
"""
return self.call_api(
"PUT",
url,
params=params,
data=data,
files=files,
**kwargs
)
|
def put(self, url, params=None, data=None, files=None, **kwargs):
""" Call the API with a PUT request.
Args:
url (str): Resource location relative to the base URL.
params (dict or None): Query-string parameters.
data (dict or None): Request body contents.
files (dict or None: Files to be passed to the request.
Returns:
An instance of ResultParser or ErrorParser.
"""
return self.call_api(
"PUT",
url,
params=params,
data=data,
files=files,
**kwargs
)
|
[
"Call",
"the",
"API",
"with",
"a",
"PUT",
"request",
"."
] |
hirmeos/entity-fishing-client-python
|
python
|
https://github.com/hirmeos/entity-fishing-client-python/blob/cd5c6e10c6c4e653669e11d735d5773766986bda/nerd/client.py#L177-L196
|
[
"def",
"put",
"(",
"self",
",",
"url",
",",
"params",
"=",
"None",
",",
"data",
"=",
"None",
",",
"files",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"call_api",
"(",
"\"PUT\"",
",",
"url",
",",
"params",
"=",
"params",
",",
"data",
"=",
"data",
",",
"files",
"=",
"files",
",",
"*",
"*",
"kwargs",
")"
] |
cd5c6e10c6c4e653669e11d735d5773766986bda
|
test
|
ApiClient.post
|
Call the API with a POST request.
Args:
url (str): Resource location relative to the base URL.
params (dict or None): Query-string parameters.
data (dict or None): Request body contents.
files (dict or None: Files to be passed to the request.
Returns:
An instance of ResultParser or ErrorParser.
|
nerd/client.py
|
def post(self, url, params=None, data=None, files=None, **kwargs):
""" Call the API with a POST request.
Args:
url (str): Resource location relative to the base URL.
params (dict or None): Query-string parameters.
data (dict or None): Request body contents.
files (dict or None: Files to be passed to the request.
Returns:
An instance of ResultParser or ErrorParser.
"""
return self.call_api(
"POST",
url,
params=params,
data=data,
files=files,
**kwargs
)
|
def post(self, url, params=None, data=None, files=None, **kwargs):
""" Call the API with a POST request.
Args:
url (str): Resource location relative to the base URL.
params (dict or None): Query-string parameters.
data (dict or None): Request body contents.
files (dict or None: Files to be passed to the request.
Returns:
An instance of ResultParser or ErrorParser.
"""
return self.call_api(
"POST",
url,
params=params,
data=data,
files=files,
**kwargs
)
|
[
"Call",
"the",
"API",
"with",
"a",
"POST",
"request",
"."
] |
hirmeos/entity-fishing-client-python
|
python
|
https://github.com/hirmeos/entity-fishing-client-python/blob/cd5c6e10c6c4e653669e11d735d5773766986bda/nerd/client.py#L198-L217
|
[
"def",
"post",
"(",
"self",
",",
"url",
",",
"params",
"=",
"None",
",",
"data",
"=",
"None",
",",
"files",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"call_api",
"(",
"\"POST\"",
",",
"url",
",",
"params",
"=",
"params",
",",
"data",
"=",
"data",
",",
"files",
"=",
"files",
",",
"*",
"*",
"kwargs",
")"
] |
cd5c6e10c6c4e653669e11d735d5773766986bda
|
test
|
NerdClient._process_query
|
Process query recursively, if the text is too long,
it is split and processed bit a bit.
Args:
query (sdict): Text to be processed.
prepared (bool): True when the query is ready to be submitted via
POST request.
Returns:
str: Body ready to be submitted to the API.
|
nerd/nerd_client.py
|
def _process_query(self, query, prepared=False):
""" Process query recursively, if the text is too long,
it is split and processed bit a bit.
Args:
query (sdict): Text to be processed.
prepared (bool): True when the query is ready to be submitted via
POST request.
Returns:
str: Body ready to be submitted to the API.
"""
# Exit condition and POST
if prepared is True:
files = {'query': str(query)}
logger.debug('About to submit the following query {}'.format(query))
res, status = self.post(
self.disambiguate_service,
files=files,
headers={'Accept': 'application/json'},
)
if status == 200:
return self.decode(res), status
else:
logger.debug('Disambiguation failed.')
return None, status
text = query['text']
sentence_coordinates = [
{
"offsetStart": 0,
"offsetEnd": len(text)
}
]
total_nb_sentences = len(sentence_coordinates) # Sentences from text.
sentences_groups = []
if len(text) > self.max_text_length:
res, status_code = self.segment(text)
if status_code == 200:
sentence_coordinates = res['sentences']
total_nb_sentences = len(sentence_coordinates)
else:
logger.error('Error during the segmentation of the text.')
logger.debug(
'Text too long, split in {} sentences; building groups of {} '
'sentences.'.format(
total_nb_sentences, self.sentences_per_group
)
)
sentences_groups = self._group_sentences(
total_nb_sentences,
self.sentences_per_group
)
else:
query['sentence'] = "true"
if total_nb_sentences > 1:
query['sentences'] = sentence_coordinates
if len(sentences_groups) > 0:
for group in sentences_groups:
query['processSentence'] = group
res, status_code = self._process_query(query, prepared=True)
if status_code == 200:
if 'entities' in res:
query['entities'] = res[u'entities']
query['language'] = res[u'language']
else:
logger.error(
"Error when processing the query {}".format(query)
)
return None, status_code
else:
res, status_code = self._process_query(query, prepared=True)
if status_code == 200:
query['language'] = res[u'language']
if 'entities' in res:
query['entities'] = res[u'entities']
else:
logger.error("Error when processing the query {}".format(query))
return None, status_code
return query, status_code
|
def _process_query(self, query, prepared=False):
""" Process query recursively, if the text is too long,
it is split and processed bit a bit.
Args:
query (sdict): Text to be processed.
prepared (bool): True when the query is ready to be submitted via
POST request.
Returns:
str: Body ready to be submitted to the API.
"""
# Exit condition and POST
if prepared is True:
files = {'query': str(query)}
logger.debug('About to submit the following query {}'.format(query))
res, status = self.post(
self.disambiguate_service,
files=files,
headers={'Accept': 'application/json'},
)
if status == 200:
return self.decode(res), status
else:
logger.debug('Disambiguation failed.')
return None, status
text = query['text']
sentence_coordinates = [
{
"offsetStart": 0,
"offsetEnd": len(text)
}
]
total_nb_sentences = len(sentence_coordinates) # Sentences from text.
sentences_groups = []
if len(text) > self.max_text_length:
res, status_code = self.segment(text)
if status_code == 200:
sentence_coordinates = res['sentences']
total_nb_sentences = len(sentence_coordinates)
else:
logger.error('Error during the segmentation of the text.')
logger.debug(
'Text too long, split in {} sentences; building groups of {} '
'sentences.'.format(
total_nb_sentences, self.sentences_per_group
)
)
sentences_groups = self._group_sentences(
total_nb_sentences,
self.sentences_per_group
)
else:
query['sentence'] = "true"
if total_nb_sentences > 1:
query['sentences'] = sentence_coordinates
if len(sentences_groups) > 0:
for group in sentences_groups:
query['processSentence'] = group
res, status_code = self._process_query(query, prepared=True)
if status_code == 200:
if 'entities' in res:
query['entities'] = res[u'entities']
query['language'] = res[u'language']
else:
logger.error(
"Error when processing the query {}".format(query)
)
return None, status_code
else:
res, status_code = self._process_query(query, prepared=True)
if status_code == 200:
query['language'] = res[u'language']
if 'entities' in res:
query['entities'] = res[u'entities']
else:
logger.error("Error when processing the query {}".format(query))
return None, status_code
return query, status_code
|
[
"Process",
"query",
"recursively",
"if",
"the",
"text",
"is",
"too",
"long",
"it",
"is",
"split",
"and",
"processed",
"bit",
"a",
"bit",
"."
] |
hirmeos/entity-fishing-client-python
|
python
|
https://github.com/hirmeos/entity-fishing-client-python/blob/cd5c6e10c6c4e653669e11d735d5773766986bda/nerd/nerd_client.py#L39-L133
|
[
"def",
"_process_query",
"(",
"self",
",",
"query",
",",
"prepared",
"=",
"False",
")",
":",
"# Exit condition and POST",
"if",
"prepared",
"is",
"True",
":",
"files",
"=",
"{",
"'query'",
":",
"str",
"(",
"query",
")",
"}",
"logger",
".",
"debug",
"(",
"'About to submit the following query {}'",
".",
"format",
"(",
"query",
")",
")",
"res",
",",
"status",
"=",
"self",
".",
"post",
"(",
"self",
".",
"disambiguate_service",
",",
"files",
"=",
"files",
",",
"headers",
"=",
"{",
"'Accept'",
":",
"'application/json'",
"}",
",",
")",
"if",
"status",
"==",
"200",
":",
"return",
"self",
".",
"decode",
"(",
"res",
")",
",",
"status",
"else",
":",
"logger",
".",
"debug",
"(",
"'Disambiguation failed.'",
")",
"return",
"None",
",",
"status",
"text",
"=",
"query",
"[",
"'text'",
"]",
"sentence_coordinates",
"=",
"[",
"{",
"\"offsetStart\"",
":",
"0",
",",
"\"offsetEnd\"",
":",
"len",
"(",
"text",
")",
"}",
"]",
"total_nb_sentences",
"=",
"len",
"(",
"sentence_coordinates",
")",
"# Sentences from text.",
"sentences_groups",
"=",
"[",
"]",
"if",
"len",
"(",
"text",
")",
">",
"self",
".",
"max_text_length",
":",
"res",
",",
"status_code",
"=",
"self",
".",
"segment",
"(",
"text",
")",
"if",
"status_code",
"==",
"200",
":",
"sentence_coordinates",
"=",
"res",
"[",
"'sentences'",
"]",
"total_nb_sentences",
"=",
"len",
"(",
"sentence_coordinates",
")",
"else",
":",
"logger",
".",
"error",
"(",
"'Error during the segmentation of the text.'",
")",
"logger",
".",
"debug",
"(",
"'Text too long, split in {} sentences; building groups of {} '",
"'sentences.'",
".",
"format",
"(",
"total_nb_sentences",
",",
"self",
".",
"sentences_per_group",
")",
")",
"sentences_groups",
"=",
"self",
".",
"_group_sentences",
"(",
"total_nb_sentences",
",",
"self",
".",
"sentences_per_group",
")",
"else",
":",
"query",
"[",
"'sentence'",
"]",
"=",
"\"true\"",
"if",
"total_nb_sentences",
">",
"1",
":",
"query",
"[",
"'sentences'",
"]",
"=",
"sentence_coordinates",
"if",
"len",
"(",
"sentences_groups",
")",
">",
"0",
":",
"for",
"group",
"in",
"sentences_groups",
":",
"query",
"[",
"'processSentence'",
"]",
"=",
"group",
"res",
",",
"status_code",
"=",
"self",
".",
"_process_query",
"(",
"query",
",",
"prepared",
"=",
"True",
")",
"if",
"status_code",
"==",
"200",
":",
"if",
"'entities'",
"in",
"res",
":",
"query",
"[",
"'entities'",
"]",
"=",
"res",
"[",
"u'entities'",
"]",
"query",
"[",
"'language'",
"]",
"=",
"res",
"[",
"u'language'",
"]",
"else",
":",
"logger",
".",
"error",
"(",
"\"Error when processing the query {}\"",
".",
"format",
"(",
"query",
")",
")",
"return",
"None",
",",
"status_code",
"else",
":",
"res",
",",
"status_code",
"=",
"self",
".",
"_process_query",
"(",
"query",
",",
"prepared",
"=",
"True",
")",
"if",
"status_code",
"==",
"200",
":",
"query",
"[",
"'language'",
"]",
"=",
"res",
"[",
"u'language'",
"]",
"if",
"'entities'",
"in",
"res",
":",
"query",
"[",
"'entities'",
"]",
"=",
"res",
"[",
"u'entities'",
"]",
"else",
":",
"logger",
".",
"error",
"(",
"\"Error when processing the query {}\"",
".",
"format",
"(",
"query",
")",
")",
"return",
"None",
",",
"status_code",
"return",
"query",
",",
"status_code"
] |
cd5c6e10c6c4e653669e11d735d5773766986bda
|
test
|
NerdClient._group_sentences
|
Split sentences in groups, given a specific group length.
Args:
total_nb_sentences (int): Total available sentences.
group_length (int): Limit of length for each group.
Returns:
list: Contains groups (lists) of sentences.
|
nerd/nerd_client.py
|
def _group_sentences(total_nb_sentences, group_length):
""" Split sentences in groups, given a specific group length.
Args:
total_nb_sentences (int): Total available sentences.
group_length (int): Limit of length for each group.
Returns:
list: Contains groups (lists) of sentences.
"""
sentences_groups = []
current_sentence_group = []
for i in range(0, total_nb_sentences):
if i % group_length == 0:
if len(current_sentence_group) > 0:
sentences_groups.append(current_sentence_group)
current_sentence_group = [i]
else:
current_sentence_group.append(i)
if len(current_sentence_group) > 0:
sentences_groups.append(current_sentence_group)
return sentences_groups
|
def _group_sentences(total_nb_sentences, group_length):
""" Split sentences in groups, given a specific group length.
Args:
total_nb_sentences (int): Total available sentences.
group_length (int): Limit of length for each group.
Returns:
list: Contains groups (lists) of sentences.
"""
sentences_groups = []
current_sentence_group = []
for i in range(0, total_nb_sentences):
if i % group_length == 0:
if len(current_sentence_group) > 0:
sentences_groups.append(current_sentence_group)
current_sentence_group = [i]
else:
current_sentence_group.append(i)
if len(current_sentence_group) > 0:
sentences_groups.append(current_sentence_group)
return sentences_groups
|
[
"Split",
"sentences",
"in",
"groups",
"given",
"a",
"specific",
"group",
"length",
"."
] |
hirmeos/entity-fishing-client-python
|
python
|
https://github.com/hirmeos/entity-fishing-client-python/blob/cd5c6e10c6c4e653669e11d735d5773766986bda/nerd/nerd_client.py#L136-L160
|
[
"def",
"_group_sentences",
"(",
"total_nb_sentences",
",",
"group_length",
")",
":",
"sentences_groups",
"=",
"[",
"]",
"current_sentence_group",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"total_nb_sentences",
")",
":",
"if",
"i",
"%",
"group_length",
"==",
"0",
":",
"if",
"len",
"(",
"current_sentence_group",
")",
">",
"0",
":",
"sentences_groups",
".",
"append",
"(",
"current_sentence_group",
")",
"current_sentence_group",
"=",
"[",
"i",
"]",
"else",
":",
"current_sentence_group",
".",
"append",
"(",
"i",
")",
"if",
"len",
"(",
"current_sentence_group",
")",
">",
"0",
":",
"sentences_groups",
".",
"append",
"(",
"current_sentence_group",
")",
"return",
"sentences_groups"
] |
cd5c6e10c6c4e653669e11d735d5773766986bda
|
test
|
NerdClient.disambiguate_pdf
|
Call the disambiguation service in order to process a pdf file .
Args:
pdf (file): PDF file to be disambiguated.
language (str): language of text (if known)
Returns:
dict, int: API response and API status.
|
nerd/nerd_client.py
|
def disambiguate_pdf(self, file, language=None, entities=None):
""" Call the disambiguation service in order to process a pdf file .
Args:
pdf (file): PDF file to be disambiguated.
language (str): language of text (if known)
Returns:
dict, int: API response and API status.
"""
body = {
"customisation": "generic"
}
if language:
body['language'] = {"lang": language}
if entities:
body['entities'] = entities
files = {
'query': str(body),
'file': (
file,
open(file, 'rb'),
'application/pdf',
{'Expires': '0'}
)
}
res, status = self.post(
self.disambiguate_service,
files=files,
headers={'Accept': 'application/json'},
)
if status != 200:
logger.debug('Disambiguation failed with error ' + str(status))
return self.decode(res), status
|
def disambiguate_pdf(self, file, language=None, entities=None):
""" Call the disambiguation service in order to process a pdf file .
Args:
pdf (file): PDF file to be disambiguated.
language (str): language of text (if known)
Returns:
dict, int: API response and API status.
"""
body = {
"customisation": "generic"
}
if language:
body['language'] = {"lang": language}
if entities:
body['entities'] = entities
files = {
'query': str(body),
'file': (
file,
open(file, 'rb'),
'application/pdf',
{'Expires': '0'}
)
}
res, status = self.post(
self.disambiguate_service,
files=files,
headers={'Accept': 'application/json'},
)
if status != 200:
logger.debug('Disambiguation failed with error ' + str(status))
return self.decode(res), status
|
[
"Call",
"the",
"disambiguation",
"service",
"in",
"order",
"to",
"process",
"a",
"pdf",
"file",
"."
] |
hirmeos/entity-fishing-client-python
|
python
|
https://github.com/hirmeos/entity-fishing-client-python/blob/cd5c6e10c6c4e653669e11d735d5773766986bda/nerd/nerd_client.py#L162-L202
|
[
"def",
"disambiguate_pdf",
"(",
"self",
",",
"file",
",",
"language",
"=",
"None",
",",
"entities",
"=",
"None",
")",
":",
"body",
"=",
"{",
"\"customisation\"",
":",
"\"generic\"",
"}",
"if",
"language",
":",
"body",
"[",
"'language'",
"]",
"=",
"{",
"\"lang\"",
":",
"language",
"}",
"if",
"entities",
":",
"body",
"[",
"'entities'",
"]",
"=",
"entities",
"files",
"=",
"{",
"'query'",
":",
"str",
"(",
"body",
")",
",",
"'file'",
":",
"(",
"file",
",",
"open",
"(",
"file",
",",
"'rb'",
")",
",",
"'application/pdf'",
",",
"{",
"'Expires'",
":",
"'0'",
"}",
")",
"}",
"res",
",",
"status",
"=",
"self",
".",
"post",
"(",
"self",
".",
"disambiguate_service",
",",
"files",
"=",
"files",
",",
"headers",
"=",
"{",
"'Accept'",
":",
"'application/json'",
"}",
",",
")",
"if",
"status",
"!=",
"200",
":",
"logger",
".",
"debug",
"(",
"'Disambiguation failed with error '",
"+",
"str",
"(",
"status",
")",
")",
"return",
"self",
".",
"decode",
"(",
"res",
")",
",",
"status"
] |
cd5c6e10c6c4e653669e11d735d5773766986bda
|
test
|
NerdClient.disambiguate_terms
|
Call the disambiguation service in order to get meanings.
Args:
terms (obj): list of objects of term, weight
language (str): language of text, english if not specified
entities (list): list of entities or mentions to be supplied by
the user.
Returns:
dict, int: API response and API status.
|
nerd/nerd_client.py
|
def disambiguate_terms(self, terms, language="en", entities=None):
""" Call the disambiguation service in order to get meanings.
Args:
terms (obj): list of objects of term, weight
language (str): language of text, english if not specified
entities (list): list of entities or mentions to be supplied by
the user.
Returns:
dict, int: API response and API status.
"""
body = {
"termVector": terms,
"entities": [],
"onlyNER": "false",
"customisation": "generic"
}
body['language'] = {"lang": language}
if entities:
body['entities'] = entities
files = {'query': str(body)}
logger.debug('About to submit the following query {}'.format(body))
res, status = self.post(
self.disambiguate_service,
files=files,
headers={'Accept': 'application/json'},
)
if status == 200:
return self.decode(res), status
else:
logger.debug('Disambiguation failed.')
return None, status
|
def disambiguate_terms(self, terms, language="en", entities=None):
""" Call the disambiguation service in order to get meanings.
Args:
terms (obj): list of objects of term, weight
language (str): language of text, english if not specified
entities (list): list of entities or mentions to be supplied by
the user.
Returns:
dict, int: API response and API status.
"""
body = {
"termVector": terms,
"entities": [],
"onlyNER": "false",
"customisation": "generic"
}
body['language'] = {"lang": language}
if entities:
body['entities'] = entities
files = {'query': str(body)}
logger.debug('About to submit the following query {}'.format(body))
res, status = self.post(
self.disambiguate_service,
files=files,
headers={'Accept': 'application/json'},
)
if status == 200:
return self.decode(res), status
else:
logger.debug('Disambiguation failed.')
return None, status
|
[
"Call",
"the",
"disambiguation",
"service",
"in",
"order",
"to",
"get",
"meanings",
"."
] |
hirmeos/entity-fishing-client-python
|
python
|
https://github.com/hirmeos/entity-fishing-client-python/blob/cd5c6e10c6c4e653669e11d735d5773766986bda/nerd/nerd_client.py#L204-L243
|
[
"def",
"disambiguate_terms",
"(",
"self",
",",
"terms",
",",
"language",
"=",
"\"en\"",
",",
"entities",
"=",
"None",
")",
":",
"body",
"=",
"{",
"\"termVector\"",
":",
"terms",
",",
"\"entities\"",
":",
"[",
"]",
",",
"\"onlyNER\"",
":",
"\"false\"",
",",
"\"customisation\"",
":",
"\"generic\"",
"}",
"body",
"[",
"'language'",
"]",
"=",
"{",
"\"lang\"",
":",
"language",
"}",
"if",
"entities",
":",
"body",
"[",
"'entities'",
"]",
"=",
"entities",
"files",
"=",
"{",
"'query'",
":",
"str",
"(",
"body",
")",
"}",
"logger",
".",
"debug",
"(",
"'About to submit the following query {}'",
".",
"format",
"(",
"body",
")",
")",
"res",
",",
"status",
"=",
"self",
".",
"post",
"(",
"self",
".",
"disambiguate_service",
",",
"files",
"=",
"files",
",",
"headers",
"=",
"{",
"'Accept'",
":",
"'application/json'",
"}",
",",
")",
"if",
"status",
"==",
"200",
":",
"return",
"self",
".",
"decode",
"(",
"res",
")",
",",
"status",
"else",
":",
"logger",
".",
"debug",
"(",
"'Disambiguation failed.'",
")",
"return",
"None",
",",
"status"
] |
cd5c6e10c6c4e653669e11d735d5773766986bda
|
test
|
NerdClient.disambiguate_text
|
Call the disambiguation service in order to get meanings.
Args:
text (str): Text to be disambiguated.
language (str): language of text (if known)
entities (list): list of entities or mentions to be supplied by
the user.
Returns:
dict, int: API response and API status.
|
nerd/nerd_client.py
|
def disambiguate_text(self, text, language=None, entities=None):
""" Call the disambiguation service in order to get meanings.
Args:
text (str): Text to be disambiguated.
language (str): language of text (if known)
entities (list): list of entities or mentions to be supplied by
the user.
Returns:
dict, int: API response and API status.
"""
body = {
"text": text,
"entities": [],
"onlyNER": "false",
"customisation": "generic"
}
if language:
body['language'] = {"lang": language}
if entities:
body['entities'] = entities
result, status_code = self._process_query(body)
if status_code != 200:
logger.debug('Disambiguation failed.')
return result, status_code
|
def disambiguate_text(self, text, language=None, entities=None):
""" Call the disambiguation service in order to get meanings.
Args:
text (str): Text to be disambiguated.
language (str): language of text (if known)
entities (list): list of entities or mentions to be supplied by
the user.
Returns:
dict, int: API response and API status.
"""
body = {
"text": text,
"entities": [],
"onlyNER": "false",
"customisation": "generic"
}
if language:
body['language'] = {"lang": language}
if entities:
body['entities'] = entities
result, status_code = self._process_query(body)
if status_code != 200:
logger.debug('Disambiguation failed.')
return result, status_code
|
[
"Call",
"the",
"disambiguation",
"service",
"in",
"order",
"to",
"get",
"meanings",
"."
] |
hirmeos/entity-fishing-client-python
|
python
|
https://github.com/hirmeos/entity-fishing-client-python/blob/cd5c6e10c6c4e653669e11d735d5773766986bda/nerd/nerd_client.py#L245-L276
|
[
"def",
"disambiguate_text",
"(",
"self",
",",
"text",
",",
"language",
"=",
"None",
",",
"entities",
"=",
"None",
")",
":",
"body",
"=",
"{",
"\"text\"",
":",
"text",
",",
"\"entities\"",
":",
"[",
"]",
",",
"\"onlyNER\"",
":",
"\"false\"",
",",
"\"customisation\"",
":",
"\"generic\"",
"}",
"if",
"language",
":",
"body",
"[",
"'language'",
"]",
"=",
"{",
"\"lang\"",
":",
"language",
"}",
"if",
"entities",
":",
"body",
"[",
"'entities'",
"]",
"=",
"entities",
"result",
",",
"status_code",
"=",
"self",
".",
"_process_query",
"(",
"body",
")",
"if",
"status_code",
"!=",
"200",
":",
"logger",
".",
"debug",
"(",
"'Disambiguation failed.'",
")",
"return",
"result",
",",
"status_code"
] |
cd5c6e10c6c4e653669e11d735d5773766986bda
|
test
|
NerdClient.disambiguate_query
|
Call the disambiguation service in order to disambiguate a search query.
Args:
text (str): Query to be disambiguated.
language (str): language of text (if known)
entities (list): list of entities or mentions to be supplied by
the user.
Returns:
dict, int: API response and API status.
|
nerd/nerd_client.py
|
def disambiguate_query(self, query, language=None, entities=None):
""" Call the disambiguation service in order to disambiguate a search query.
Args:
text (str): Query to be disambiguated.
language (str): language of text (if known)
entities (list): list of entities or mentions to be supplied by
the user.
Returns:
dict, int: API response and API status.
"""
body = {
"shortText": query,
"entities": [],
"onlyNER": "false",
"customisation": "generic"
}
if language:
body['language'] = {"lang": language}
if entities:
body['entities'] = entities
files = {'query': str(body)}
logger.debug('About to submit the following query {}'.format(body))
res, status = self.post(
self.disambiguate_service,
files=files,
headers={'Accept': 'application/json'},
)
if status == 200:
return self.decode(res), status
else:
logger.debug('Disambiguation failed.')
return None, status
|
def disambiguate_query(self, query, language=None, entities=None):
""" Call the disambiguation service in order to disambiguate a search query.
Args:
text (str): Query to be disambiguated.
language (str): language of text (if known)
entities (list): list of entities or mentions to be supplied by
the user.
Returns:
dict, int: API response and API status.
"""
body = {
"shortText": query,
"entities": [],
"onlyNER": "false",
"customisation": "generic"
}
if language:
body['language'] = {"lang": language}
if entities:
body['entities'] = entities
files = {'query': str(body)}
logger.debug('About to submit the following query {}'.format(body))
res, status = self.post(
self.disambiguate_service,
files=files,
headers={'Accept': 'application/json'},
)
if status == 200:
return self.decode(res), status
else:
logger.debug('Disambiguation failed.')
return None, status
|
[
"Call",
"the",
"disambiguation",
"service",
"in",
"order",
"to",
"disambiguate",
"a",
"search",
"query",
"."
] |
hirmeos/entity-fishing-client-python
|
python
|
https://github.com/hirmeos/entity-fishing-client-python/blob/cd5c6e10c6c4e653669e11d735d5773766986bda/nerd/nerd_client.py#L278-L318
|
[
"def",
"disambiguate_query",
"(",
"self",
",",
"query",
",",
"language",
"=",
"None",
",",
"entities",
"=",
"None",
")",
":",
"body",
"=",
"{",
"\"shortText\"",
":",
"query",
",",
"\"entities\"",
":",
"[",
"]",
",",
"\"onlyNER\"",
":",
"\"false\"",
",",
"\"customisation\"",
":",
"\"generic\"",
"}",
"if",
"language",
":",
"body",
"[",
"'language'",
"]",
"=",
"{",
"\"lang\"",
":",
"language",
"}",
"if",
"entities",
":",
"body",
"[",
"'entities'",
"]",
"=",
"entities",
"files",
"=",
"{",
"'query'",
":",
"str",
"(",
"body",
")",
"}",
"logger",
".",
"debug",
"(",
"'About to submit the following query {}'",
".",
"format",
"(",
"body",
")",
")",
"res",
",",
"status",
"=",
"self",
".",
"post",
"(",
"self",
".",
"disambiguate_service",
",",
"files",
"=",
"files",
",",
"headers",
"=",
"{",
"'Accept'",
":",
"'application/json'",
"}",
",",
")",
"if",
"status",
"==",
"200",
":",
"return",
"self",
".",
"decode",
"(",
"res",
")",
",",
"status",
"else",
":",
"logger",
".",
"debug",
"(",
"'Disambiguation failed.'",
")",
"return",
"None",
",",
"status"
] |
cd5c6e10c6c4e653669e11d735d5773766986bda
|
test
|
NerdClient.segment
|
Call the segmenter in order to split text in sentences.
Args:
text (str): Text to be segmented.
Returns:
dict, int: A dict containing a list of dicts with the offsets of
each sentence; an integer representing the response code.
|
nerd/nerd_client.py
|
def segment(self, text):
""" Call the segmenter in order to split text in sentences.
Args:
text (str): Text to be segmented.
Returns:
dict, int: A dict containing a list of dicts with the offsets of
each sentence; an integer representing the response code.
"""
files = {'text': text}
res, status_code = self.post(self.segmentation_service, files=files)
if status_code != 200:
logger.debug('Segmentation failed.')
return self.decode(res), status_code
|
def segment(self, text):
""" Call the segmenter in order to split text in sentences.
Args:
text (str): Text to be segmented.
Returns:
dict, int: A dict containing a list of dicts with the offsets of
each sentence; an integer representing the response code.
"""
files = {'text': text}
res, status_code = self.post(self.segmentation_service, files=files)
if status_code != 200:
logger.debug('Segmentation failed.')
return self.decode(res), status_code
|
[
"Call",
"the",
"segmenter",
"in",
"order",
"to",
"split",
"text",
"in",
"sentences",
"."
] |
hirmeos/entity-fishing-client-python
|
python
|
https://github.com/hirmeos/entity-fishing-client-python/blob/cd5c6e10c6c4e653669e11d735d5773766986bda/nerd/nerd_client.py#L320-L337
|
[
"def",
"segment",
"(",
"self",
",",
"text",
")",
":",
"files",
"=",
"{",
"'text'",
":",
"text",
"}",
"res",
",",
"status_code",
"=",
"self",
".",
"post",
"(",
"self",
".",
"segmentation_service",
",",
"files",
"=",
"files",
")",
"if",
"status_code",
"!=",
"200",
":",
"logger",
".",
"debug",
"(",
"'Segmentation failed.'",
")",
"return",
"self",
".",
"decode",
"(",
"res",
")",
",",
"status_code"
] |
cd5c6e10c6c4e653669e11d735d5773766986bda
|
test
|
NerdClient.get_language
|
Recognise the language of the text in input
Args:
id (str): The text whose the language needs to be recognised
Returns:
dict, int: A dict containing the recognised language and the
confidence score.
|
nerd/nerd_client.py
|
def get_language(self, text):
""" Recognise the language of the text in input
Args:
id (str): The text whose the language needs to be recognised
Returns:
dict, int: A dict containing the recognised language and the
confidence score.
"""
files = {'text': text}
res, status_code = self.post(self.language_service, files=files)
if status_code != 200:
logger.debug('Language recognition failed.')
return self.decode(res), status_code
|
def get_language(self, text):
""" Recognise the language of the text in input
Args:
id (str): The text whose the language needs to be recognised
Returns:
dict, int: A dict containing the recognised language and the
confidence score.
"""
files = {'text': text}
res, status_code = self.post(self.language_service, files=files)
if status_code != 200:
logger.debug('Language recognition failed.')
return self.decode(res), status_code
|
[
"Recognise",
"the",
"language",
"of",
"the",
"text",
"in",
"input"
] |
hirmeos/entity-fishing-client-python
|
python
|
https://github.com/hirmeos/entity-fishing-client-python/blob/cd5c6e10c6c4e653669e11d735d5773766986bda/nerd/nerd_client.py#L339-L355
|
[
"def",
"get_language",
"(",
"self",
",",
"text",
")",
":",
"files",
"=",
"{",
"'text'",
":",
"text",
"}",
"res",
",",
"status_code",
"=",
"self",
".",
"post",
"(",
"self",
".",
"language_service",
",",
"files",
"=",
"files",
")",
"if",
"status_code",
"!=",
"200",
":",
"logger",
".",
"debug",
"(",
"'Language recognition failed.'",
")",
"return",
"self",
".",
"decode",
"(",
"res",
")",
",",
"status_code"
] |
cd5c6e10c6c4e653669e11d735d5773766986bda
|
test
|
NerdClient.get_concept
|
Fetch the concept from the Knowledge base
Args:
id (str): The concept id to be fetched, it can be Wikipedia
page id or Wikiedata id.
Returns:
dict, int: A dict containing the concept information; an integer
representing the response code.
|
nerd/nerd_client.py
|
def get_concept(self, conceptId, lang='en'):
""" Fetch the concept from the Knowledge base
Args:
id (str): The concept id to be fetched, it can be Wikipedia
page id or Wikiedata id.
Returns:
dict, int: A dict containing the concept information; an integer
representing the response code.
"""
url = urljoin(self.concept_service + '/', conceptId)
res, status_code = self.get(url, params={'lang': lang})
if status_code != 200:
logger.debug('Fetch concept failed.')
return self.decode(res), status_code
|
def get_concept(self, conceptId, lang='en'):
""" Fetch the concept from the Knowledge base
Args:
id (str): The concept id to be fetched, it can be Wikipedia
page id or Wikiedata id.
Returns:
dict, int: A dict containing the concept information; an integer
representing the response code.
"""
url = urljoin(self.concept_service + '/', conceptId)
res, status_code = self.get(url, params={'lang': lang})
if status_code != 200:
logger.debug('Fetch concept failed.')
return self.decode(res), status_code
|
[
"Fetch",
"the",
"concept",
"from",
"the",
"Knowledge",
"base"
] |
hirmeos/entity-fishing-client-python
|
python
|
https://github.com/hirmeos/entity-fishing-client-python/blob/cd5c6e10c6c4e653669e11d735d5773766986bda/nerd/nerd_client.py#L357-L375
|
[
"def",
"get_concept",
"(",
"self",
",",
"conceptId",
",",
"lang",
"=",
"'en'",
")",
":",
"url",
"=",
"urljoin",
"(",
"self",
".",
"concept_service",
"+",
"'/'",
",",
"conceptId",
")",
"res",
",",
"status_code",
"=",
"self",
".",
"get",
"(",
"url",
",",
"params",
"=",
"{",
"'lang'",
":",
"lang",
"}",
")",
"if",
"status_code",
"!=",
"200",
":",
"logger",
".",
"debug",
"(",
"'Fetch concept failed.'",
")",
"return",
"self",
".",
"decode",
"(",
"res",
")",
",",
"status_code"
] |
cd5c6e10c6c4e653669e11d735d5773766986bda
|
test
|
MDREnsemble.fit
|
Constructs the MDR ensemble from the provided training data
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix
classes: array-like {n_samples}
List of class labels for prediction
Returns
-------
None
|
mdr/mdr_ensemble.py
|
def fit(self, features, classes):
"""Constructs the MDR ensemble from the provided training data
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix
classes: array-like {n_samples}
List of class labels for prediction
Returns
-------
None
"""
self.ensemble.fit(features, classes)
# Construct the feature map from the ensemble predictions
unique_rows = list(set([tuple(row) for row in features]))
for row in unique_rows:
self.feature_map[row] = self.ensemble.predict([row])[0]
|
def fit(self, features, classes):
"""Constructs the MDR ensemble from the provided training data
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix
classes: array-like {n_samples}
List of class labels for prediction
Returns
-------
None
"""
self.ensemble.fit(features, classes)
# Construct the feature map from the ensemble predictions
unique_rows = list(set([tuple(row) for row in features]))
for row in unique_rows:
self.feature_map[row] = self.ensemble.predict([row])[0]
|
[
"Constructs",
"the",
"MDR",
"ensemble",
"from",
"the",
"provided",
"training",
"data"
] |
EpistasisLab/scikit-mdr
|
python
|
https://github.com/EpistasisLab/scikit-mdr/blob/768565deb10467d04a960d27e000ab38b7aa8a62/mdr/mdr_ensemble.py#L71-L91
|
[
"def",
"fit",
"(",
"self",
",",
"features",
",",
"classes",
")",
":",
"self",
".",
"ensemble",
".",
"fit",
"(",
"features",
",",
"classes",
")",
"# Construct the feature map from the ensemble predictions",
"unique_rows",
"=",
"list",
"(",
"set",
"(",
"[",
"tuple",
"(",
"row",
")",
"for",
"row",
"in",
"features",
"]",
")",
")",
"for",
"row",
"in",
"unique_rows",
":",
"self",
".",
"feature_map",
"[",
"row",
"]",
"=",
"self",
".",
"ensemble",
".",
"predict",
"(",
"[",
"row",
"]",
")",
"[",
"0",
"]"
] |
768565deb10467d04a960d27e000ab38b7aa8a62
|
test
|
MDREnsemble.score
|
Estimates the accuracy of the predictions from the MDR ensemble
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix to predict from
classes: array-like {n_samples}
List of true class labels
Returns
-------
accuracy_score: float
The estimated accuracy based on the constructed feature
|
mdr/mdr_ensemble.py
|
def score(self, features, classes, scoring_function=None, **scoring_function_kwargs):
"""Estimates the accuracy of the predictions from the MDR ensemble
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix to predict from
classes: array-like {n_samples}
List of true class labels
Returns
-------
accuracy_score: float
The estimated accuracy based on the constructed feature
"""
new_feature = self.ensemble.predict(features)
if scoring_function is None:
return accuracy_score(classes, new_feature)
else:
return scoring_function(classes, new_feature, **scoring_function_kwargs)
|
def score(self, features, classes, scoring_function=None, **scoring_function_kwargs):
"""Estimates the accuracy of the predictions from the MDR ensemble
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix to predict from
classes: array-like {n_samples}
List of true class labels
Returns
-------
accuracy_score: float
The estimated accuracy based on the constructed feature
"""
new_feature = self.ensemble.predict(features)
if scoring_function is None:
return accuracy_score(classes, new_feature)
else:
return scoring_function(classes, new_feature, **scoring_function_kwargs)
|
[
"Estimates",
"the",
"accuracy",
"of",
"the",
"predictions",
"from",
"the",
"MDR",
"ensemble"
] |
EpistasisLab/scikit-mdr
|
python
|
https://github.com/EpistasisLab/scikit-mdr/blob/768565deb10467d04a960d27e000ab38b7aa8a62/mdr/mdr_ensemble.py#L128-L149
|
[
"def",
"score",
"(",
"self",
",",
"features",
",",
"classes",
",",
"scoring_function",
"=",
"None",
",",
"*",
"*",
"scoring_function_kwargs",
")",
":",
"new_feature",
"=",
"self",
".",
"ensemble",
".",
"predict",
"(",
"features",
")",
"if",
"scoring_function",
"is",
"None",
":",
"return",
"accuracy_score",
"(",
"classes",
",",
"new_feature",
")",
"else",
":",
"return",
"scoring_function",
"(",
"classes",
",",
"new_feature",
",",
"*",
"*",
"scoring_function_kwargs",
")"
] |
768565deb10467d04a960d27e000ab38b7aa8a62
|
test
|
MDRBase.fit
|
Constructs the MDR feature map from the provided training data.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix
class_labels: array-like {n_samples}
List of true class labels
Returns
-------
self: A copy of the fitted model
|
mdr/mdr.py
|
def fit(self, features, class_labels):
"""Constructs the MDR feature map from the provided training data.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix
class_labels: array-like {n_samples}
List of true class labels
Returns
-------
self: A copy of the fitted model
"""
unique_labels = sorted(np.unique(class_labels))
if len(unique_labels) != 2:
raise ValueError('MDR only supports binary endpoints.')
# Count the distribution of classes that fall into each MDR grid cell
self.class_count_matrix = defaultdict(lambda: defaultdict(int))
for row_i in range(features.shape[0]):
feature_instance = tuple(features[row_i])
self.class_count_matrix[feature_instance][class_labels[row_i]] += 1
self.class_count_matrix = dict(self.class_count_matrix)
# Only applies to binary classification
overall_class_fraction = float(sum(class_labels == unique_labels[0])) / class_labels.size
# If one class is more abundant in a MDR grid cell than it is overall, then assign the cell to that class
self.feature_map = {}
for feature_instance in self.class_count_matrix:
counts = self.class_count_matrix[feature_instance]
fraction = float(counts[unique_labels[0]]) / np.sum(list(counts.values()))
if fraction > overall_class_fraction:
self.feature_map[feature_instance] = unique_labels[0]
elif fraction == overall_class_fraction:
self.feature_map[feature_instance] = self.tie_break
else:
self.feature_map[feature_instance] = unique_labels[1]
return self
|
def fit(self, features, class_labels):
"""Constructs the MDR feature map from the provided training data.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix
class_labels: array-like {n_samples}
List of true class labels
Returns
-------
self: A copy of the fitted model
"""
unique_labels = sorted(np.unique(class_labels))
if len(unique_labels) != 2:
raise ValueError('MDR only supports binary endpoints.')
# Count the distribution of classes that fall into each MDR grid cell
self.class_count_matrix = defaultdict(lambda: defaultdict(int))
for row_i in range(features.shape[0]):
feature_instance = tuple(features[row_i])
self.class_count_matrix[feature_instance][class_labels[row_i]] += 1
self.class_count_matrix = dict(self.class_count_matrix)
# Only applies to binary classification
overall_class_fraction = float(sum(class_labels == unique_labels[0])) / class_labels.size
# If one class is more abundant in a MDR grid cell than it is overall, then assign the cell to that class
self.feature_map = {}
for feature_instance in self.class_count_matrix:
counts = self.class_count_matrix[feature_instance]
fraction = float(counts[unique_labels[0]]) / np.sum(list(counts.values()))
if fraction > overall_class_fraction:
self.feature_map[feature_instance] = unique_labels[0]
elif fraction == overall_class_fraction:
self.feature_map[feature_instance] = self.tie_break
else:
self.feature_map[feature_instance] = unique_labels[1]
return self
|
[
"Constructs",
"the",
"MDR",
"feature",
"map",
"from",
"the",
"provided",
"training",
"data",
"."
] |
EpistasisLab/scikit-mdr
|
python
|
https://github.com/EpistasisLab/scikit-mdr/blob/768565deb10467d04a960d27e000ab38b7aa8a62/mdr/mdr.py#L59-L100
|
[
"def",
"fit",
"(",
"self",
",",
"features",
",",
"class_labels",
")",
":",
"unique_labels",
"=",
"sorted",
"(",
"np",
".",
"unique",
"(",
"class_labels",
")",
")",
"if",
"len",
"(",
"unique_labels",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'MDR only supports binary endpoints.'",
")",
"# Count the distribution of classes that fall into each MDR grid cell",
"self",
".",
"class_count_matrix",
"=",
"defaultdict",
"(",
"lambda",
":",
"defaultdict",
"(",
"int",
")",
")",
"for",
"row_i",
"in",
"range",
"(",
"features",
".",
"shape",
"[",
"0",
"]",
")",
":",
"feature_instance",
"=",
"tuple",
"(",
"features",
"[",
"row_i",
"]",
")",
"self",
".",
"class_count_matrix",
"[",
"feature_instance",
"]",
"[",
"class_labels",
"[",
"row_i",
"]",
"]",
"+=",
"1",
"self",
".",
"class_count_matrix",
"=",
"dict",
"(",
"self",
".",
"class_count_matrix",
")",
"# Only applies to binary classification",
"overall_class_fraction",
"=",
"float",
"(",
"sum",
"(",
"class_labels",
"==",
"unique_labels",
"[",
"0",
"]",
")",
")",
"/",
"class_labels",
".",
"size",
"# If one class is more abundant in a MDR grid cell than it is overall, then assign the cell to that class",
"self",
".",
"feature_map",
"=",
"{",
"}",
"for",
"feature_instance",
"in",
"self",
".",
"class_count_matrix",
":",
"counts",
"=",
"self",
".",
"class_count_matrix",
"[",
"feature_instance",
"]",
"fraction",
"=",
"float",
"(",
"counts",
"[",
"unique_labels",
"[",
"0",
"]",
"]",
")",
"/",
"np",
".",
"sum",
"(",
"list",
"(",
"counts",
".",
"values",
"(",
")",
")",
")",
"if",
"fraction",
">",
"overall_class_fraction",
":",
"self",
".",
"feature_map",
"[",
"feature_instance",
"]",
"=",
"unique_labels",
"[",
"0",
"]",
"elif",
"fraction",
"==",
"overall_class_fraction",
":",
"self",
".",
"feature_map",
"[",
"feature_instance",
"]",
"=",
"self",
".",
"tie_break",
"else",
":",
"self",
".",
"feature_map",
"[",
"feature_instance",
"]",
"=",
"unique_labels",
"[",
"1",
"]",
"return",
"self"
] |
768565deb10467d04a960d27e000ab38b7aa8a62
|
test
|
MDR.fit_transform
|
Convenience function that fits the provided data then constructs a new feature from the provided features.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix
class_labels: array-like {n_samples}
List of true class labels
Returns
----------
array-like: {n_samples, 1}
Constructed features from the provided feature matrix
|
mdr/mdr.py
|
def fit_transform(self, features, class_labels):
"""Convenience function that fits the provided data then constructs a new feature from the provided features.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix
class_labels: array-like {n_samples}
List of true class labels
Returns
----------
array-like: {n_samples, 1}
Constructed features from the provided feature matrix
"""
self.fit(features, class_labels)
return self.transform(features)
|
def fit_transform(self, features, class_labels):
"""Convenience function that fits the provided data then constructs a new feature from the provided features.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix
class_labels: array-like {n_samples}
List of true class labels
Returns
----------
array-like: {n_samples, 1}
Constructed features from the provided feature matrix
"""
self.fit(features, class_labels)
return self.transform(features)
|
[
"Convenience",
"function",
"that",
"fits",
"the",
"provided",
"data",
"then",
"constructs",
"a",
"new",
"feature",
"from",
"the",
"provided",
"features",
"."
] |
EpistasisLab/scikit-mdr
|
python
|
https://github.com/EpistasisLab/scikit-mdr/blob/768565deb10467d04a960d27e000ab38b7aa8a62/mdr/mdr.py#L137-L154
|
[
"def",
"fit_transform",
"(",
"self",
",",
"features",
",",
"class_labels",
")",
":",
"self",
".",
"fit",
"(",
"features",
",",
"class_labels",
")",
"return",
"self",
".",
"transform",
"(",
"features",
")"
] |
768565deb10467d04a960d27e000ab38b7aa8a62
|
test
|
MDRClassifier.fit_predict
|
Convenience function that fits the provided data then constructs predictions from the provided features.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix
class_labels: array-like {n_samples}
List of true class labels
Returns
----------
array-like: {n_samples}
Constructed features from the provided feature matrix
|
mdr/mdr.py
|
def fit_predict(self, features, class_labels):
"""Convenience function that fits the provided data then constructs predictions from the provided features.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix
class_labels: array-like {n_samples}
List of true class labels
Returns
----------
array-like: {n_samples}
Constructed features from the provided feature matrix
"""
self.fit(features, class_labels)
return self.predict(features)
|
def fit_predict(self, features, class_labels):
"""Convenience function that fits the provided data then constructs predictions from the provided features.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix
class_labels: array-like {n_samples}
List of true class labels
Returns
----------
array-like: {n_samples}
Constructed features from the provided feature matrix
"""
self.fit(features, class_labels)
return self.predict(features)
|
[
"Convenience",
"function",
"that",
"fits",
"the",
"provided",
"data",
"then",
"constructs",
"predictions",
"from",
"the",
"provided",
"features",
"."
] |
EpistasisLab/scikit-mdr
|
python
|
https://github.com/EpistasisLab/scikit-mdr/blob/768565deb10467d04a960d27e000ab38b7aa8a62/mdr/mdr.py#L191-L208
|
[
"def",
"fit_predict",
"(",
"self",
",",
"features",
",",
"class_labels",
")",
":",
"self",
".",
"fit",
"(",
"features",
",",
"class_labels",
")",
"return",
"self",
".",
"predict",
"(",
"features",
")"
] |
768565deb10467d04a960d27e000ab38b7aa8a62
|
test
|
MDRClassifier.score
|
Estimates the accuracy of the predictions from the constructed feature.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix to predict from
class_labels: array-like {n_samples}
List of true class labels
Returns
-------
accuracy_score: float
The estimated accuracy based on the constructed feature
|
mdr/mdr.py
|
def score(self, features, class_labels, scoring_function=None, **scoring_function_kwargs):
"""Estimates the accuracy of the predictions from the constructed feature.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix to predict from
class_labels: array-like {n_samples}
List of true class labels
Returns
-------
accuracy_score: float
The estimated accuracy based on the constructed feature
"""
if self.feature_map is None:
raise ValueError('The MDR model must be fit before score can be called.')
new_feature = self.predict(features)
if scoring_function is None:
return accuracy_score(class_labels, new_feature)
else:
return scoring_function(class_labels, new_feature, **scoring_function_kwargs)
|
def score(self, features, class_labels, scoring_function=None, **scoring_function_kwargs):
"""Estimates the accuracy of the predictions from the constructed feature.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix to predict from
class_labels: array-like {n_samples}
List of true class labels
Returns
-------
accuracy_score: float
The estimated accuracy based on the constructed feature
"""
if self.feature_map is None:
raise ValueError('The MDR model must be fit before score can be called.')
new_feature = self.predict(features)
if scoring_function is None:
return accuracy_score(class_labels, new_feature)
else:
return scoring_function(class_labels, new_feature, **scoring_function_kwargs)
|
[
"Estimates",
"the",
"accuracy",
"of",
"the",
"predictions",
"from",
"the",
"constructed",
"feature",
"."
] |
EpistasisLab/scikit-mdr
|
python
|
https://github.com/EpistasisLab/scikit-mdr/blob/768565deb10467d04a960d27e000ab38b7aa8a62/mdr/mdr.py#L210-L234
|
[
"def",
"score",
"(",
"self",
",",
"features",
",",
"class_labels",
",",
"scoring_function",
"=",
"None",
",",
"*",
"*",
"scoring_function_kwargs",
")",
":",
"if",
"self",
".",
"feature_map",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'The MDR model must be fit before score can be called.'",
")",
"new_feature",
"=",
"self",
".",
"predict",
"(",
"features",
")",
"if",
"scoring_function",
"is",
"None",
":",
"return",
"accuracy_score",
"(",
"class_labels",
",",
"new_feature",
")",
"else",
":",
"return",
"scoring_function",
"(",
"class_labels",
",",
"new_feature",
",",
"*",
"*",
"scoring_function_kwargs",
")"
] |
768565deb10467d04a960d27e000ab38b7aa8a62
|
test
|
ContinuousMDR.fit
|
Constructs the Continuous MDR feature map from the provided training data.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix
targets: array-like {n_samples}
List of target values for prediction
Returns
-------
self: A copy of the fitted model
|
mdr/continuous_mdr.py
|
def fit(self, features, targets):
"""Constructs the Continuous MDR feature map from the provided training data.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix
targets: array-like {n_samples}
List of target values for prediction
Returns
-------
self: A copy of the fitted model
"""
self.feature_map = defaultdict(lambda: self.default_label)
self.overall_mean_trait_value = np.mean(targets)
self.mdr_matrix_values = defaultdict(list)
for row_i in range(features.shape[0]):
feature_instance = tuple(features[row_i])
self.mdr_matrix_values[feature_instance].append(targets[row_i])
for feature_instance in self.mdr_matrix_values:
grid_mean_trait_value = np.mean(self.mdr_matrix_values[feature_instance])
if grid_mean_trait_value > self.overall_mean_trait_value:
self.feature_map[feature_instance] = 1
elif grid_mean_trait_value == self.overall_mean_trait_value:
self.feature_map[feature_instance] = self.tie_break
else:
self.feature_map[feature_instance] = 0
# Convert defaultdict to dict so CMDR objects can be easily pickled
self.feature_map = dict(self.feature_map)
self.mdr_matrix_values = dict(self.mdr_matrix_values)
return self
|
def fit(self, features, targets):
"""Constructs the Continuous MDR feature map from the provided training data.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix
targets: array-like {n_samples}
List of target values for prediction
Returns
-------
self: A copy of the fitted model
"""
self.feature_map = defaultdict(lambda: self.default_label)
self.overall_mean_trait_value = np.mean(targets)
self.mdr_matrix_values = defaultdict(list)
for row_i in range(features.shape[0]):
feature_instance = tuple(features[row_i])
self.mdr_matrix_values[feature_instance].append(targets[row_i])
for feature_instance in self.mdr_matrix_values:
grid_mean_trait_value = np.mean(self.mdr_matrix_values[feature_instance])
if grid_mean_trait_value > self.overall_mean_trait_value:
self.feature_map[feature_instance] = 1
elif grid_mean_trait_value == self.overall_mean_trait_value:
self.feature_map[feature_instance] = self.tie_break
else:
self.feature_map[feature_instance] = 0
# Convert defaultdict to dict so CMDR objects can be easily pickled
self.feature_map = dict(self.feature_map)
self.mdr_matrix_values = dict(self.mdr_matrix_values)
return self
|
[
"Constructs",
"the",
"Continuous",
"MDR",
"feature",
"map",
"from",
"the",
"provided",
"training",
"data",
"."
] |
EpistasisLab/scikit-mdr
|
python
|
https://github.com/EpistasisLab/scikit-mdr/blob/768565deb10467d04a960d27e000ab38b7aa8a62/mdr/continuous_mdr.py#L57-L93
|
[
"def",
"fit",
"(",
"self",
",",
"features",
",",
"targets",
")",
":",
"self",
".",
"feature_map",
"=",
"defaultdict",
"(",
"lambda",
":",
"self",
".",
"default_label",
")",
"self",
".",
"overall_mean_trait_value",
"=",
"np",
".",
"mean",
"(",
"targets",
")",
"self",
".",
"mdr_matrix_values",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"row_i",
"in",
"range",
"(",
"features",
".",
"shape",
"[",
"0",
"]",
")",
":",
"feature_instance",
"=",
"tuple",
"(",
"features",
"[",
"row_i",
"]",
")",
"self",
".",
"mdr_matrix_values",
"[",
"feature_instance",
"]",
".",
"append",
"(",
"targets",
"[",
"row_i",
"]",
")",
"for",
"feature_instance",
"in",
"self",
".",
"mdr_matrix_values",
":",
"grid_mean_trait_value",
"=",
"np",
".",
"mean",
"(",
"self",
".",
"mdr_matrix_values",
"[",
"feature_instance",
"]",
")",
"if",
"grid_mean_trait_value",
">",
"self",
".",
"overall_mean_trait_value",
":",
"self",
".",
"feature_map",
"[",
"feature_instance",
"]",
"=",
"1",
"elif",
"grid_mean_trait_value",
"==",
"self",
".",
"overall_mean_trait_value",
":",
"self",
".",
"feature_map",
"[",
"feature_instance",
"]",
"=",
"self",
".",
"tie_break",
"else",
":",
"self",
".",
"feature_map",
"[",
"feature_instance",
"]",
"=",
"0",
"# Convert defaultdict to dict so CMDR objects can be easily pickled",
"self",
".",
"feature_map",
"=",
"dict",
"(",
"self",
".",
"feature_map",
")",
"self",
".",
"mdr_matrix_values",
"=",
"dict",
"(",
"self",
".",
"mdr_matrix_values",
")",
"return",
"self"
] |
768565deb10467d04a960d27e000ab38b7aa8a62
|
test
|
ContinuousMDR.transform
|
Uses the Continuous MDR feature map to construct a new feature from the provided features.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix to transform
Returns
----------
array-like: {n_samples}
Constructed feature from the provided feature matrix
The constructed feature will be a binary variable, taking the values 0 and 1
|
mdr/continuous_mdr.py
|
def transform(self, features):
"""Uses the Continuous MDR feature map to construct a new feature from the provided features.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix to transform
Returns
----------
array-like: {n_samples}
Constructed feature from the provided feature matrix
The constructed feature will be a binary variable, taking the values 0 and 1
"""
new_feature = np.zeros(features.shape[0], dtype=np.int)
for row_i in range(features.shape[0]):
feature_instance = tuple(features[row_i])
if feature_instance in self.feature_map:
new_feature[row_i] = self.feature_map[feature_instance]
else:
new_feature[row_i] = self.default_label
return new_feature.reshape(features.shape[0], 1)
|
def transform(self, features):
"""Uses the Continuous MDR feature map to construct a new feature from the provided features.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix to transform
Returns
----------
array-like: {n_samples}
Constructed feature from the provided feature matrix
The constructed feature will be a binary variable, taking the values 0 and 1
"""
new_feature = np.zeros(features.shape[0], dtype=np.int)
for row_i in range(features.shape[0]):
feature_instance = tuple(features[row_i])
if feature_instance in self.feature_map:
new_feature[row_i] = self.feature_map[feature_instance]
else:
new_feature[row_i] = self.default_label
return new_feature.reshape(features.shape[0], 1)
|
[
"Uses",
"the",
"Continuous",
"MDR",
"feature",
"map",
"to",
"construct",
"a",
"new",
"feature",
"from",
"the",
"provided",
"features",
"."
] |
EpistasisLab/scikit-mdr
|
python
|
https://github.com/EpistasisLab/scikit-mdr/blob/768565deb10467d04a960d27e000ab38b7aa8a62/mdr/continuous_mdr.py#L95-L119
|
[
"def",
"transform",
"(",
"self",
",",
"features",
")",
":",
"new_feature",
"=",
"np",
".",
"zeros",
"(",
"features",
".",
"shape",
"[",
"0",
"]",
",",
"dtype",
"=",
"np",
".",
"int",
")",
"for",
"row_i",
"in",
"range",
"(",
"features",
".",
"shape",
"[",
"0",
"]",
")",
":",
"feature_instance",
"=",
"tuple",
"(",
"features",
"[",
"row_i",
"]",
")",
"if",
"feature_instance",
"in",
"self",
".",
"feature_map",
":",
"new_feature",
"[",
"row_i",
"]",
"=",
"self",
".",
"feature_map",
"[",
"feature_instance",
"]",
"else",
":",
"new_feature",
"[",
"row_i",
"]",
"=",
"self",
".",
"default_label",
"return",
"new_feature",
".",
"reshape",
"(",
"features",
".",
"shape",
"[",
"0",
"]",
",",
"1",
")"
] |
768565deb10467d04a960d27e000ab38b7aa8a62
|
test
|
ContinuousMDR.fit_transform
|
Convenience function that fits the provided data then constructs a new feature from the provided features.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix
targets: array-like {n_samples}
List of true target values
Returns
----------
array-like: {n_samples}
Constructed features from the provided feature matrix
|
mdr/continuous_mdr.py
|
def fit_transform(self, features, targets):
"""Convenience function that fits the provided data then constructs a new feature from the provided features.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix
targets: array-like {n_samples}
List of true target values
Returns
----------
array-like: {n_samples}
Constructed features from the provided feature matrix
"""
self.fit(features, targets)
return self.transform(features)
|
def fit_transform(self, features, targets):
"""Convenience function that fits the provided data then constructs a new feature from the provided features.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix
targets: array-like {n_samples}
List of true target values
Returns
----------
array-like: {n_samples}
Constructed features from the provided feature matrix
"""
self.fit(features, targets)
return self.transform(features)
|
[
"Convenience",
"function",
"that",
"fits",
"the",
"provided",
"data",
"then",
"constructs",
"a",
"new",
"feature",
"from",
"the",
"provided",
"features",
"."
] |
EpistasisLab/scikit-mdr
|
python
|
https://github.com/EpistasisLab/scikit-mdr/blob/768565deb10467d04a960d27e000ab38b7aa8a62/mdr/continuous_mdr.py#L121-L138
|
[
"def",
"fit_transform",
"(",
"self",
",",
"features",
",",
"targets",
")",
":",
"self",
".",
"fit",
"(",
"features",
",",
"targets",
")",
"return",
"self",
".",
"transform",
"(",
"features",
")"
] |
768565deb10467d04a960d27e000ab38b7aa8a62
|
test
|
ContinuousMDR.score
|
Estimates the quality of the ContinuousMDR model using a t-statistic.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix to predict from
targets: array-like {n_samples}
List of true target values
Returns
-------
quality_score: float
The estimated quality of the Continuous MDR model
|
mdr/continuous_mdr.py
|
def score(self, features, targets):
"""Estimates the quality of the ContinuousMDR model using a t-statistic.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix to predict from
targets: array-like {n_samples}
List of true target values
Returns
-------
quality_score: float
The estimated quality of the Continuous MDR model
"""
if self.feature_map is None:
raise ValueError('The Continuous MDR model must be fit before score() can be called.')
group_0_trait_values = []
group_1_trait_values = []
for feature_instance in self.feature_map:
if self.feature_map[feature_instance] == 0:
group_0_trait_values.extend(self.mdr_matrix_values[feature_instance])
else:
group_1_trait_values.extend(self.mdr_matrix_values[feature_instance])
return abs(ttest_ind(group_0_trait_values, group_1_trait_values).statistic)
|
def score(self, features, targets):
"""Estimates the quality of the ContinuousMDR model using a t-statistic.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix to predict from
targets: array-like {n_samples}
List of true target values
Returns
-------
quality_score: float
The estimated quality of the Continuous MDR model
"""
if self.feature_map is None:
raise ValueError('The Continuous MDR model must be fit before score() can be called.')
group_0_trait_values = []
group_1_trait_values = []
for feature_instance in self.feature_map:
if self.feature_map[feature_instance] == 0:
group_0_trait_values.extend(self.mdr_matrix_values[feature_instance])
else:
group_1_trait_values.extend(self.mdr_matrix_values[feature_instance])
return abs(ttest_ind(group_0_trait_values, group_1_trait_values).statistic)
|
[
"Estimates",
"the",
"quality",
"of",
"the",
"ContinuousMDR",
"model",
"using",
"a",
"t",
"-",
"statistic",
"."
] |
EpistasisLab/scikit-mdr
|
python
|
https://github.com/EpistasisLab/scikit-mdr/blob/768565deb10467d04a960d27e000ab38b7aa8a62/mdr/continuous_mdr.py#L140-L168
|
[
"def",
"score",
"(",
"self",
",",
"features",
",",
"targets",
")",
":",
"if",
"self",
".",
"feature_map",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'The Continuous MDR model must be fit before score() can be called.'",
")",
"group_0_trait_values",
"=",
"[",
"]",
"group_1_trait_values",
"=",
"[",
"]",
"for",
"feature_instance",
"in",
"self",
".",
"feature_map",
":",
"if",
"self",
".",
"feature_map",
"[",
"feature_instance",
"]",
"==",
"0",
":",
"group_0_trait_values",
".",
"extend",
"(",
"self",
".",
"mdr_matrix_values",
"[",
"feature_instance",
"]",
")",
"else",
":",
"group_1_trait_values",
".",
"extend",
"(",
"self",
".",
"mdr_matrix_values",
"[",
"feature_instance",
"]",
")",
"return",
"abs",
"(",
"ttest_ind",
"(",
"group_0_trait_values",
",",
"group_1_trait_values",
")",
".",
"statistic",
")"
] |
768565deb10467d04a960d27e000ab38b7aa8a62
|
test
|
entropy
|
Calculates the entropy, H(X), in the given base
Parameters
----------
X: array-like (# samples)
An array of values for which to compute the entropy
base: integer (default: 2)
The base in which to calculate entropy
Returns
----------
entropy: float
The entropy calculated according to the equation H(X) = -sum(p_x * log p_x) for all states of X
|
mdr/utils/utils.py
|
def entropy(X, base=2):
"""Calculates the entropy, H(X), in the given base
Parameters
----------
X: array-like (# samples)
An array of values for which to compute the entropy
base: integer (default: 2)
The base in which to calculate entropy
Returns
----------
entropy: float
The entropy calculated according to the equation H(X) = -sum(p_x * log p_x) for all states of X
"""
return scipy.stats.entropy(list(Counter(X).values()), base=base)
|
def entropy(X, base=2):
"""Calculates the entropy, H(X), in the given base
Parameters
----------
X: array-like (# samples)
An array of values for which to compute the entropy
base: integer (default: 2)
The base in which to calculate entropy
Returns
----------
entropy: float
The entropy calculated according to the equation H(X) = -sum(p_x * log p_x) for all states of X
"""
return scipy.stats.entropy(list(Counter(X).values()), base=base)
|
[
"Calculates",
"the",
"entropy",
"H",
"(",
"X",
")",
"in",
"the",
"given",
"base"
] |
EpistasisLab/scikit-mdr
|
python
|
https://github.com/EpistasisLab/scikit-mdr/blob/768565deb10467d04a960d27e000ab38b7aa8a62/mdr/utils/utils.py#L29-L45
|
[
"def",
"entropy",
"(",
"X",
",",
"base",
"=",
"2",
")",
":",
"return",
"scipy",
".",
"stats",
".",
"entropy",
"(",
"list",
"(",
"Counter",
"(",
"X",
")",
".",
"values",
"(",
")",
")",
",",
"base",
"=",
"base",
")"
] |
768565deb10467d04a960d27e000ab38b7aa8a62
|
test
|
joint_entropy
|
Calculates the joint entropy, H(X,Y), in the given base
Parameters
----------
X: array-like (# samples)
An array of values for which to compute the joint entropy
Y: array-like (# samples)
An array of values for which to compute the joint entropy
base: integer (default: 2)
The base in which to calculate joint entropy
Returns
----------
joint_entropy: float
The joint entropy calculated according to the equation H(X,Y) = -sum(p_xy * log p_xy) for all combined states of X and Y
|
mdr/utils/utils.py
|
def joint_entropy(X, Y, base=2):
"""Calculates the joint entropy, H(X,Y), in the given base
Parameters
----------
X: array-like (# samples)
An array of values for which to compute the joint entropy
Y: array-like (# samples)
An array of values for which to compute the joint entropy
base: integer (default: 2)
The base in which to calculate joint entropy
Returns
----------
joint_entropy: float
The joint entropy calculated according to the equation H(X,Y) = -sum(p_xy * log p_xy) for all combined states of X and Y
"""
X_Y = ['{}{}'.format(x, y) for x, y in zip(X, Y)]
return entropy(X_Y, base=base)
|
def joint_entropy(X, Y, base=2):
"""Calculates the joint entropy, H(X,Y), in the given base
Parameters
----------
X: array-like (# samples)
An array of values for which to compute the joint entropy
Y: array-like (# samples)
An array of values for which to compute the joint entropy
base: integer (default: 2)
The base in which to calculate joint entropy
Returns
----------
joint_entropy: float
The joint entropy calculated according to the equation H(X,Y) = -sum(p_xy * log p_xy) for all combined states of X and Y
"""
X_Y = ['{}{}'.format(x, y) for x, y in zip(X, Y)]
return entropy(X_Y, base=base)
|
[
"Calculates",
"the",
"joint",
"entropy",
"H",
"(",
"X",
"Y",
")",
"in",
"the",
"given",
"base"
] |
EpistasisLab/scikit-mdr
|
python
|
https://github.com/EpistasisLab/scikit-mdr/blob/768565deb10467d04a960d27e000ab38b7aa8a62/mdr/utils/utils.py#L47-L66
|
[
"def",
"joint_entropy",
"(",
"X",
",",
"Y",
",",
"base",
"=",
"2",
")",
":",
"X_Y",
"=",
"[",
"'{}{}'",
".",
"format",
"(",
"x",
",",
"y",
")",
"for",
"x",
",",
"y",
"in",
"zip",
"(",
"X",
",",
"Y",
")",
"]",
"return",
"entropy",
"(",
"X_Y",
",",
"base",
"=",
"base",
")"
] |
768565deb10467d04a960d27e000ab38b7aa8a62
|
test
|
conditional_entropy
|
Calculates the conditional entropy, H(X|Y), in the given base
Parameters
----------
X: array-like (# samples)
An array of values for which to compute the conditional entropy
Y: array-like (# samples)
An array of values for which to compute the conditional entropy
base: integer (default: 2)
The base in which to calculate conditional entropy
Returns
----------
conditional_entropy: float
The conditional entropy calculated according to the equation H(X|Y) = H(X,Y) - H(Y)
|
mdr/utils/utils.py
|
def conditional_entropy(X, Y, base=2):
"""Calculates the conditional entropy, H(X|Y), in the given base
Parameters
----------
X: array-like (# samples)
An array of values for which to compute the conditional entropy
Y: array-like (# samples)
An array of values for which to compute the conditional entropy
base: integer (default: 2)
The base in which to calculate conditional entropy
Returns
----------
conditional_entropy: float
The conditional entropy calculated according to the equation H(X|Y) = H(X,Y) - H(Y)
"""
return joint_entropy(X, Y, base=base) - entropy(Y, base=base)
|
def conditional_entropy(X, Y, base=2):
"""Calculates the conditional entropy, H(X|Y), in the given base
Parameters
----------
X: array-like (# samples)
An array of values for which to compute the conditional entropy
Y: array-like (# samples)
An array of values for which to compute the conditional entropy
base: integer (default: 2)
The base in which to calculate conditional entropy
Returns
----------
conditional_entropy: float
The conditional entropy calculated according to the equation H(X|Y) = H(X,Y) - H(Y)
"""
return joint_entropy(X, Y, base=base) - entropy(Y, base=base)
|
[
"Calculates",
"the",
"conditional",
"entropy",
"H",
"(",
"X|Y",
")",
"in",
"the",
"given",
"base"
] |
EpistasisLab/scikit-mdr
|
python
|
https://github.com/EpistasisLab/scikit-mdr/blob/768565deb10467d04a960d27e000ab38b7aa8a62/mdr/utils/utils.py#L68-L86
|
[
"def",
"conditional_entropy",
"(",
"X",
",",
"Y",
",",
"base",
"=",
"2",
")",
":",
"return",
"joint_entropy",
"(",
"X",
",",
"Y",
",",
"base",
"=",
"base",
")",
"-",
"entropy",
"(",
"Y",
",",
"base",
"=",
"base",
")"
] |
768565deb10467d04a960d27e000ab38b7aa8a62
|
test
|
mutual_information
|
Calculates the mutual information between two variables, I(X;Y), in the given base
Parameters
----------
X: array-like (# samples)
An array of values for which to compute the mutual information
Y: array-like (# samples)
An array of values for which to compute the mutual information
base: integer (default: 2)
The base in which to calculate mutual information
Returns
----------
mutual_information: float
The mutual information calculated according to the equation I(X;Y) = H(Y) - H(Y|X)
|
mdr/utils/utils.py
|
def mutual_information(X, Y, base=2):
"""Calculates the mutual information between two variables, I(X;Y), in the given base
Parameters
----------
X: array-like (# samples)
An array of values for which to compute the mutual information
Y: array-like (# samples)
An array of values for which to compute the mutual information
base: integer (default: 2)
The base in which to calculate mutual information
Returns
----------
mutual_information: float
The mutual information calculated according to the equation I(X;Y) = H(Y) - H(Y|X)
"""
return entropy(Y, base=base) - conditional_entropy(Y, X, base=base)
|
def mutual_information(X, Y, base=2):
"""Calculates the mutual information between two variables, I(X;Y), in the given base
Parameters
----------
X: array-like (# samples)
An array of values for which to compute the mutual information
Y: array-like (# samples)
An array of values for which to compute the mutual information
base: integer (default: 2)
The base in which to calculate mutual information
Returns
----------
mutual_information: float
The mutual information calculated according to the equation I(X;Y) = H(Y) - H(Y|X)
"""
return entropy(Y, base=base) - conditional_entropy(Y, X, base=base)
|
[
"Calculates",
"the",
"mutual",
"information",
"between",
"two",
"variables",
"I",
"(",
"X",
";",
"Y",
")",
"in",
"the",
"given",
"base"
] |
EpistasisLab/scikit-mdr
|
python
|
https://github.com/EpistasisLab/scikit-mdr/blob/768565deb10467d04a960d27e000ab38b7aa8a62/mdr/utils/utils.py#L88-L106
|
[
"def",
"mutual_information",
"(",
"X",
",",
"Y",
",",
"base",
"=",
"2",
")",
":",
"return",
"entropy",
"(",
"Y",
",",
"base",
"=",
"base",
")",
"-",
"conditional_entropy",
"(",
"Y",
",",
"X",
",",
"base",
"=",
"base",
")"
] |
768565deb10467d04a960d27e000ab38b7aa8a62
|
test
|
two_way_information_gain
|
Calculates the two-way information gain between three variables, I(X;Y;Z), in the given base
IG(X;Y;Z) indicates the information gained about variable Z by the joint variable X_Y, after removing
the information that X and Y have about Z individually. Thus, two-way information gain measures the
synergistic predictive value of variables X and Y about variable Z.
Parameters
----------
X: array-like (# samples)
An array of values for which to compute the 2-way information gain
Y: array-like (# samples)
An array of values for which to compute the 2-way information gain
Z: array-like (# samples)
An array of outcome values for which to compute the 2-way information gain
base: integer (default: 2)
The base in which to calculate 2-way information
Returns
----------
mutual_information: float
The information gain calculated according to the equation IG(X;Y;Z) = I(X,Y;Z) - I(X;Z) - I(Y;Z)
|
mdr/utils/utils.py
|
def two_way_information_gain(X, Y, Z, base=2):
"""Calculates the two-way information gain between three variables, I(X;Y;Z), in the given base
IG(X;Y;Z) indicates the information gained about variable Z by the joint variable X_Y, after removing
the information that X and Y have about Z individually. Thus, two-way information gain measures the
synergistic predictive value of variables X and Y about variable Z.
Parameters
----------
X: array-like (# samples)
An array of values for which to compute the 2-way information gain
Y: array-like (# samples)
An array of values for which to compute the 2-way information gain
Z: array-like (# samples)
An array of outcome values for which to compute the 2-way information gain
base: integer (default: 2)
The base in which to calculate 2-way information
Returns
----------
mutual_information: float
The information gain calculated according to the equation IG(X;Y;Z) = I(X,Y;Z) - I(X;Z) - I(Y;Z)
"""
X_Y = ['{}{}'.format(x, y) for x, y in zip(X, Y)]
return (mutual_information(X_Y, Z, base=base) -
mutual_information(X, Z, base=base) -
mutual_information(Y, Z, base=base))
|
def two_way_information_gain(X, Y, Z, base=2):
"""Calculates the two-way information gain between three variables, I(X;Y;Z), in the given base
IG(X;Y;Z) indicates the information gained about variable Z by the joint variable X_Y, after removing
the information that X and Y have about Z individually. Thus, two-way information gain measures the
synergistic predictive value of variables X and Y about variable Z.
Parameters
----------
X: array-like (# samples)
An array of values for which to compute the 2-way information gain
Y: array-like (# samples)
An array of values for which to compute the 2-way information gain
Z: array-like (# samples)
An array of outcome values for which to compute the 2-way information gain
base: integer (default: 2)
The base in which to calculate 2-way information
Returns
----------
mutual_information: float
The information gain calculated according to the equation IG(X;Y;Z) = I(X,Y;Z) - I(X;Z) - I(Y;Z)
"""
X_Y = ['{}{}'.format(x, y) for x, y in zip(X, Y)]
return (mutual_information(X_Y, Z, base=base) -
mutual_information(X, Z, base=base) -
mutual_information(Y, Z, base=base))
|
[
"Calculates",
"the",
"two",
"-",
"way",
"information",
"gain",
"between",
"three",
"variables",
"I",
"(",
"X",
";",
"Y",
";",
"Z",
")",
"in",
"the",
"given",
"base"
] |
EpistasisLab/scikit-mdr
|
python
|
https://github.com/EpistasisLab/scikit-mdr/blob/768565deb10467d04a960d27e000ab38b7aa8a62/mdr/utils/utils.py#L108-L135
|
[
"def",
"two_way_information_gain",
"(",
"X",
",",
"Y",
",",
"Z",
",",
"base",
"=",
"2",
")",
":",
"X_Y",
"=",
"[",
"'{}{}'",
".",
"format",
"(",
"x",
",",
"y",
")",
"for",
"x",
",",
"y",
"in",
"zip",
"(",
"X",
",",
"Y",
")",
"]",
"return",
"(",
"mutual_information",
"(",
"X_Y",
",",
"Z",
",",
"base",
"=",
"base",
")",
"-",
"mutual_information",
"(",
"X",
",",
"Z",
",",
"base",
"=",
"base",
")",
"-",
"mutual_information",
"(",
"Y",
",",
"Z",
",",
"base",
"=",
"base",
")",
")"
] |
768565deb10467d04a960d27e000ab38b7aa8a62
|
test
|
three_way_information_gain
|
Calculates the three-way information gain between three variables, I(W;X;Y;Z), in the given base
IG(W;X;Y;Z) indicates the information gained about variable Z by the joint variable W_X_Y, after removing
the information that W, X, and Y have about Z individually and jointly in pairs. Thus, 3-way information gain
measures the synergistic predictive value of variables W, X, and Y about variable Z.
Parameters
----------
W: array-like (# samples)
An array of values for which to compute the 3-way information gain
X: array-like (# samples)
An array of values for which to compute the 3-way information gain
Y: array-like (# samples)
An array of values for which to compute the 3-way information gain
Z: array-like (# samples)
An array of outcome values for which to compute the 3-way information gain
base: integer (default: 2)
The base in which to calculate 3-way information
Returns
----------
mutual_information: float
The information gain calculated according to the equation:
IG(W;X;Y;Z) = I(W,X,Y;Z) - IG(W;X;Z) - IG(W;Y;Z) - IG(X;Y;Z) - I(W;Z) - I(X;Z) - I(Y;Z)
|
mdr/utils/utils.py
|
def three_way_information_gain(W, X, Y, Z, base=2):
"""Calculates the three-way information gain between three variables, I(W;X;Y;Z), in the given base
IG(W;X;Y;Z) indicates the information gained about variable Z by the joint variable W_X_Y, after removing
the information that W, X, and Y have about Z individually and jointly in pairs. Thus, 3-way information gain
measures the synergistic predictive value of variables W, X, and Y about variable Z.
Parameters
----------
W: array-like (# samples)
An array of values for which to compute the 3-way information gain
X: array-like (# samples)
An array of values for which to compute the 3-way information gain
Y: array-like (# samples)
An array of values for which to compute the 3-way information gain
Z: array-like (# samples)
An array of outcome values for which to compute the 3-way information gain
base: integer (default: 2)
The base in which to calculate 3-way information
Returns
----------
mutual_information: float
The information gain calculated according to the equation:
IG(W;X;Y;Z) = I(W,X,Y;Z) - IG(W;X;Z) - IG(W;Y;Z) - IG(X;Y;Z) - I(W;Z) - I(X;Z) - I(Y;Z)
"""
W_X_Y = ['{}{}{}'.format(w, x, y) for w, x, y in zip(W, X, Y)]
return (mutual_information(W_X_Y, Z, base=base) -
two_way_information_gain(W, X, Z, base=base) -
two_way_information_gain(W, Y, Z, base=base) -
two_way_information_gain(X, Y, Z, base=base) -
mutual_information(W, Z, base=base) -
mutual_information(X, Z, base=base) -
mutual_information(Y, Z, base=base))
|
def three_way_information_gain(W, X, Y, Z, base=2):
"""Calculates the three-way information gain between three variables, I(W;X;Y;Z), in the given base
IG(W;X;Y;Z) indicates the information gained about variable Z by the joint variable W_X_Y, after removing
the information that W, X, and Y have about Z individually and jointly in pairs. Thus, 3-way information gain
measures the synergistic predictive value of variables W, X, and Y about variable Z.
Parameters
----------
W: array-like (# samples)
An array of values for which to compute the 3-way information gain
X: array-like (# samples)
An array of values for which to compute the 3-way information gain
Y: array-like (# samples)
An array of values for which to compute the 3-way information gain
Z: array-like (# samples)
An array of outcome values for which to compute the 3-way information gain
base: integer (default: 2)
The base in which to calculate 3-way information
Returns
----------
mutual_information: float
The information gain calculated according to the equation:
IG(W;X;Y;Z) = I(W,X,Y;Z) - IG(W;X;Z) - IG(W;Y;Z) - IG(X;Y;Z) - I(W;Z) - I(X;Z) - I(Y;Z)
"""
W_X_Y = ['{}{}{}'.format(w, x, y) for w, x, y in zip(W, X, Y)]
return (mutual_information(W_X_Y, Z, base=base) -
two_way_information_gain(W, X, Z, base=base) -
two_way_information_gain(W, Y, Z, base=base) -
two_way_information_gain(X, Y, Z, base=base) -
mutual_information(W, Z, base=base) -
mutual_information(X, Z, base=base) -
mutual_information(Y, Z, base=base))
|
[
"Calculates",
"the",
"three",
"-",
"way",
"information",
"gain",
"between",
"three",
"variables",
"I",
"(",
"W",
";",
"X",
";",
"Y",
";",
"Z",
")",
"in",
"the",
"given",
"base"
] |
EpistasisLab/scikit-mdr
|
python
|
https://github.com/EpistasisLab/scikit-mdr/blob/768565deb10467d04a960d27e000ab38b7aa8a62/mdr/utils/utils.py#L137-L171
|
[
"def",
"three_way_information_gain",
"(",
"W",
",",
"X",
",",
"Y",
",",
"Z",
",",
"base",
"=",
"2",
")",
":",
"W_X_Y",
"=",
"[",
"'{}{}{}'",
".",
"format",
"(",
"w",
",",
"x",
",",
"y",
")",
"for",
"w",
",",
"x",
",",
"y",
"in",
"zip",
"(",
"W",
",",
"X",
",",
"Y",
")",
"]",
"return",
"(",
"mutual_information",
"(",
"W_X_Y",
",",
"Z",
",",
"base",
"=",
"base",
")",
"-",
"two_way_information_gain",
"(",
"W",
",",
"X",
",",
"Z",
",",
"base",
"=",
"base",
")",
"-",
"two_way_information_gain",
"(",
"W",
",",
"Y",
",",
"Z",
",",
"base",
"=",
"base",
")",
"-",
"two_way_information_gain",
"(",
"X",
",",
"Y",
",",
"Z",
",",
"base",
"=",
"base",
")",
"-",
"mutual_information",
"(",
"W",
",",
"Z",
",",
"base",
"=",
"base",
")",
"-",
"mutual_information",
"(",
"X",
",",
"Z",
",",
"base",
"=",
"base",
")",
"-",
"mutual_information",
"(",
"Y",
",",
"Z",
",",
"base",
"=",
"base",
")",
")"
] |
768565deb10467d04a960d27e000ab38b7aa8a62
|
test
|
_mdr_predict
|
Fits a MDR model to variables X and Y with the given labels, then returns the resulting predictions
This is a convenience method that should only be used internally.
Parameters
----------
X: array-like (# samples)
An array of values corresponding to one feature in the MDR model
Y: array-like (# samples)
An array of values corresponding to one feature in the MDR model
labels: array-like (# samples)
The class labels corresponding to features X and Y
Returns
----------
predictions: array-like (# samples)
The predictions from the fitted MDR model
|
mdr/utils/utils.py
|
def _mdr_predict(X, Y, labels):
"""Fits a MDR model to variables X and Y with the given labels, then returns the resulting predictions
This is a convenience method that should only be used internally.
Parameters
----------
X: array-like (# samples)
An array of values corresponding to one feature in the MDR model
Y: array-like (# samples)
An array of values corresponding to one feature in the MDR model
labels: array-like (# samples)
The class labels corresponding to features X and Y
Returns
----------
predictions: array-like (# samples)
The predictions from the fitted MDR model
"""
return MDR().fit_predict(np.column_stack((X, Y)), labels)
|
def _mdr_predict(X, Y, labels):
"""Fits a MDR model to variables X and Y with the given labels, then returns the resulting predictions
This is a convenience method that should only be used internally.
Parameters
----------
X: array-like (# samples)
An array of values corresponding to one feature in the MDR model
Y: array-like (# samples)
An array of values corresponding to one feature in the MDR model
labels: array-like (# samples)
The class labels corresponding to features X and Y
Returns
----------
predictions: array-like (# samples)
The predictions from the fitted MDR model
"""
return MDR().fit_predict(np.column_stack((X, Y)), labels)
|
[
"Fits",
"a",
"MDR",
"model",
"to",
"variables",
"X",
"and",
"Y",
"with",
"the",
"given",
"labels",
"then",
"returns",
"the",
"resulting",
"predictions"
] |
EpistasisLab/scikit-mdr
|
python
|
https://github.com/EpistasisLab/scikit-mdr/blob/768565deb10467d04a960d27e000ab38b7aa8a62/mdr/utils/utils.py#L173-L193
|
[
"def",
"_mdr_predict",
"(",
"X",
",",
"Y",
",",
"labels",
")",
":",
"return",
"MDR",
"(",
")",
".",
"fit_predict",
"(",
"np",
".",
"column_stack",
"(",
"(",
"X",
",",
"Y",
")",
")",
",",
"labels",
")"
] |
768565deb10467d04a960d27e000ab38b7aa8a62
|
test
|
mdr_entropy
|
Calculates the MDR entropy, H(XY), in the given base
MDR entropy is calculated by combining variables X and Y into a single MDR model then calculating
the entropy of the resulting model's predictions.
Parameters
----------
X: array-like (# samples)
An array of values corresponding to one feature in the MDR model
Y: array-like (# samples)
An array of values corresponding to one feature in the MDR model
labels: array-like (# samples)
The class labels corresponding to features X and Y
base: integer (default: 2)
The base in which to calculate MDR entropy
Returns
----------
mdr_entropy: float
The MDR entropy calculated according to the equation H(XY) = -sum(p_xy * log p_xy) for all output states of the MDR model
|
mdr/utils/utils.py
|
def mdr_entropy(X, Y, labels, base=2):
"""Calculates the MDR entropy, H(XY), in the given base
MDR entropy is calculated by combining variables X and Y into a single MDR model then calculating
the entropy of the resulting model's predictions.
Parameters
----------
X: array-like (# samples)
An array of values corresponding to one feature in the MDR model
Y: array-like (# samples)
An array of values corresponding to one feature in the MDR model
labels: array-like (# samples)
The class labels corresponding to features X and Y
base: integer (default: 2)
The base in which to calculate MDR entropy
Returns
----------
mdr_entropy: float
The MDR entropy calculated according to the equation H(XY) = -sum(p_xy * log p_xy) for all output states of the MDR model
"""
return entropy(_mdr_predict(X, Y, labels), base=base)
|
def mdr_entropy(X, Y, labels, base=2):
"""Calculates the MDR entropy, H(XY), in the given base
MDR entropy is calculated by combining variables X and Y into a single MDR model then calculating
the entropy of the resulting model's predictions.
Parameters
----------
X: array-like (# samples)
An array of values corresponding to one feature in the MDR model
Y: array-like (# samples)
An array of values corresponding to one feature in the MDR model
labels: array-like (# samples)
The class labels corresponding to features X and Y
base: integer (default: 2)
The base in which to calculate MDR entropy
Returns
----------
mdr_entropy: float
The MDR entropy calculated according to the equation H(XY) = -sum(p_xy * log p_xy) for all output states of the MDR model
"""
return entropy(_mdr_predict(X, Y, labels), base=base)
|
[
"Calculates",
"the",
"MDR",
"entropy",
"H",
"(",
"XY",
")",
"in",
"the",
"given",
"base"
] |
EpistasisLab/scikit-mdr
|
python
|
https://github.com/EpistasisLab/scikit-mdr/blob/768565deb10467d04a960d27e000ab38b7aa8a62/mdr/utils/utils.py#L195-L218
|
[
"def",
"mdr_entropy",
"(",
"X",
",",
"Y",
",",
"labels",
",",
"base",
"=",
"2",
")",
":",
"return",
"entropy",
"(",
"_mdr_predict",
"(",
"X",
",",
"Y",
",",
"labels",
")",
",",
"base",
"=",
"base",
")"
] |
768565deb10467d04a960d27e000ab38b7aa8a62
|
test
|
mdr_conditional_entropy
|
Calculates the MDR conditional entropy, H(XY|labels), in the given base
MDR conditional entropy is calculated by combining variables X and Y into a single MDR model then calculating
the entropy of the resulting model's predictions conditional on the provided labels.
Parameters
----------
X: array-like (# samples)
An array of values corresponding to one feature in the MDR model
Y: array-like (# samples)
An array of values corresponding to one feature in the MDR model
labels: array-like (# samples)
The class labels corresponding to features X and Y
base: integer (default: 2)
The base in which to calculate MDR conditional entropy
Returns
----------
mdr_conditional_entropy: float
The MDR conditional entropy calculated according to the equation H(XY|labels) = H(XY,labels) - H(labels)
|
mdr/utils/utils.py
|
def mdr_conditional_entropy(X, Y, labels, base=2):
"""Calculates the MDR conditional entropy, H(XY|labels), in the given base
MDR conditional entropy is calculated by combining variables X and Y into a single MDR model then calculating
the entropy of the resulting model's predictions conditional on the provided labels.
Parameters
----------
X: array-like (# samples)
An array of values corresponding to one feature in the MDR model
Y: array-like (# samples)
An array of values corresponding to one feature in the MDR model
labels: array-like (# samples)
The class labels corresponding to features X and Y
base: integer (default: 2)
The base in which to calculate MDR conditional entropy
Returns
----------
mdr_conditional_entropy: float
The MDR conditional entropy calculated according to the equation H(XY|labels) = H(XY,labels) - H(labels)
"""
return conditional_entropy(_mdr_predict(X, Y, labels), labels, base=base)
|
def mdr_conditional_entropy(X, Y, labels, base=2):
"""Calculates the MDR conditional entropy, H(XY|labels), in the given base
MDR conditional entropy is calculated by combining variables X and Y into a single MDR model then calculating
the entropy of the resulting model's predictions conditional on the provided labels.
Parameters
----------
X: array-like (# samples)
An array of values corresponding to one feature in the MDR model
Y: array-like (# samples)
An array of values corresponding to one feature in the MDR model
labels: array-like (# samples)
The class labels corresponding to features X and Y
base: integer (default: 2)
The base in which to calculate MDR conditional entropy
Returns
----------
mdr_conditional_entropy: float
The MDR conditional entropy calculated according to the equation H(XY|labels) = H(XY,labels) - H(labels)
"""
return conditional_entropy(_mdr_predict(X, Y, labels), labels, base=base)
|
[
"Calculates",
"the",
"MDR",
"conditional",
"entropy",
"H",
"(",
"XY|labels",
")",
"in",
"the",
"given",
"base"
] |
EpistasisLab/scikit-mdr
|
python
|
https://github.com/EpistasisLab/scikit-mdr/blob/768565deb10467d04a960d27e000ab38b7aa8a62/mdr/utils/utils.py#L220-L243
|
[
"def",
"mdr_conditional_entropy",
"(",
"X",
",",
"Y",
",",
"labels",
",",
"base",
"=",
"2",
")",
":",
"return",
"conditional_entropy",
"(",
"_mdr_predict",
"(",
"X",
",",
"Y",
",",
"labels",
")",
",",
"labels",
",",
"base",
"=",
"base",
")"
] |
768565deb10467d04a960d27e000ab38b7aa8a62
|
test
|
mdr_mutual_information
|
Calculates the MDR mutual information, I(XY;labels), in the given base
MDR mutual information is calculated by combining variables X and Y into a single MDR model then calculating
the mutual information between the resulting model's predictions and the labels.
Parameters
----------
X: array-like (# samples)
An array of values corresponding to one feature in the MDR model
Y: array-like (# samples)
An array of values corresponding to one feature in the MDR model
labels: array-like (# samples)
The class labels corresponding to features X and Y
base: integer (default: 2)
The base in which to calculate MDR mutual information
Returns
----------
mdr_mutual_information: float
The MDR mutual information calculated according to the equation I(XY;labels) = H(labels) - H(labels|XY)
|
mdr/utils/utils.py
|
def mdr_mutual_information(X, Y, labels, base=2):
"""Calculates the MDR mutual information, I(XY;labels), in the given base
MDR mutual information is calculated by combining variables X and Y into a single MDR model then calculating
the mutual information between the resulting model's predictions and the labels.
Parameters
----------
X: array-like (# samples)
An array of values corresponding to one feature in the MDR model
Y: array-like (# samples)
An array of values corresponding to one feature in the MDR model
labels: array-like (# samples)
The class labels corresponding to features X and Y
base: integer (default: 2)
The base in which to calculate MDR mutual information
Returns
----------
mdr_mutual_information: float
The MDR mutual information calculated according to the equation I(XY;labels) = H(labels) - H(labels|XY)
"""
return mutual_information(_mdr_predict(X, Y, labels), labels, base=base)
|
def mdr_mutual_information(X, Y, labels, base=2):
"""Calculates the MDR mutual information, I(XY;labels), in the given base
MDR mutual information is calculated by combining variables X and Y into a single MDR model then calculating
the mutual information between the resulting model's predictions and the labels.
Parameters
----------
X: array-like (# samples)
An array of values corresponding to one feature in the MDR model
Y: array-like (# samples)
An array of values corresponding to one feature in the MDR model
labels: array-like (# samples)
The class labels corresponding to features X and Y
base: integer (default: 2)
The base in which to calculate MDR mutual information
Returns
----------
mdr_mutual_information: float
The MDR mutual information calculated according to the equation I(XY;labels) = H(labels) - H(labels|XY)
"""
return mutual_information(_mdr_predict(X, Y, labels), labels, base=base)
|
[
"Calculates",
"the",
"MDR",
"mutual",
"information",
"I",
"(",
"XY",
";",
"labels",
")",
"in",
"the",
"given",
"base"
] |
EpistasisLab/scikit-mdr
|
python
|
https://github.com/EpistasisLab/scikit-mdr/blob/768565deb10467d04a960d27e000ab38b7aa8a62/mdr/utils/utils.py#L245-L268
|
[
"def",
"mdr_mutual_information",
"(",
"X",
",",
"Y",
",",
"labels",
",",
"base",
"=",
"2",
")",
":",
"return",
"mutual_information",
"(",
"_mdr_predict",
"(",
"X",
",",
"Y",
",",
"labels",
")",
",",
"labels",
",",
"base",
"=",
"base",
")"
] |
768565deb10467d04a960d27e000ab38b7aa8a62
|
test
|
n_way_models
|
Fits a MDR model to all n-way combinations of the features in X.
Note that this function performs an exhaustive search through all feature combinations and can be computationally expensive.
Parameters
----------
mdr_instance: object
An instance of the MDR type to use.
X: array-like (# rows, # features)
NumPy matrix containing the features
y: array-like (# rows, 1)
NumPy matrix containing the target values
n: list (default: [2])
The maximum size(s) of the MDR model to generate.
e.g., if n == [3], all 3-way models will be generated.
feature_names: list (default: None)
The corresponding names of the features in X.
If None, then the features will be named according to their order.
Returns
----------
(fitted_model, fitted_model_score, fitted_model_features): tuple of (list, list, list)
fitted_model contains the MDR model fitted to the data.
fitted_model_score contains the training scores corresponding to the fitted MDR model.
fitted_model_features contains a list of the names of the features that were used in the corresponding model.
|
mdr/utils/utils.py
|
def n_way_models(mdr_instance, X, y, n=[2], feature_names=None):
"""Fits a MDR model to all n-way combinations of the features in X.
Note that this function performs an exhaustive search through all feature combinations and can be computationally expensive.
Parameters
----------
mdr_instance: object
An instance of the MDR type to use.
X: array-like (# rows, # features)
NumPy matrix containing the features
y: array-like (# rows, 1)
NumPy matrix containing the target values
n: list (default: [2])
The maximum size(s) of the MDR model to generate.
e.g., if n == [3], all 3-way models will be generated.
feature_names: list (default: None)
The corresponding names of the features in X.
If None, then the features will be named according to their order.
Returns
----------
(fitted_model, fitted_model_score, fitted_model_features): tuple of (list, list, list)
fitted_model contains the MDR model fitted to the data.
fitted_model_score contains the training scores corresponding to the fitted MDR model.
fitted_model_features contains a list of the names of the features that were used in the corresponding model.
"""
if feature_names is None:
feature_names = list(range(X.shape[1]))
for cur_n in n:
for features in itertools.combinations(range(X.shape[1]), cur_n):
mdr_model = copy.deepcopy(mdr_instance)
mdr_model.fit(X[:, features], y)
mdr_model_score = mdr_model.score(X[:, features], y)
model_features = [feature_names[feature] for feature in features]
yield mdr_model, mdr_model_score, model_features
|
def n_way_models(mdr_instance, X, y, n=[2], feature_names=None):
"""Fits a MDR model to all n-way combinations of the features in X.
Note that this function performs an exhaustive search through all feature combinations and can be computationally expensive.
Parameters
----------
mdr_instance: object
An instance of the MDR type to use.
X: array-like (# rows, # features)
NumPy matrix containing the features
y: array-like (# rows, 1)
NumPy matrix containing the target values
n: list (default: [2])
The maximum size(s) of the MDR model to generate.
e.g., if n == [3], all 3-way models will be generated.
feature_names: list (default: None)
The corresponding names of the features in X.
If None, then the features will be named according to their order.
Returns
----------
(fitted_model, fitted_model_score, fitted_model_features): tuple of (list, list, list)
fitted_model contains the MDR model fitted to the data.
fitted_model_score contains the training scores corresponding to the fitted MDR model.
fitted_model_features contains a list of the names of the features that were used in the corresponding model.
"""
if feature_names is None:
feature_names = list(range(X.shape[1]))
for cur_n in n:
for features in itertools.combinations(range(X.shape[1]), cur_n):
mdr_model = copy.deepcopy(mdr_instance)
mdr_model.fit(X[:, features], y)
mdr_model_score = mdr_model.score(X[:, features], y)
model_features = [feature_names[feature] for feature in features]
yield mdr_model, mdr_model_score, model_features
|
[
"Fits",
"a",
"MDR",
"model",
"to",
"all",
"n",
"-",
"way",
"combinations",
"of",
"the",
"features",
"in",
"X",
"."
] |
EpistasisLab/scikit-mdr
|
python
|
https://github.com/EpistasisLab/scikit-mdr/blob/768565deb10467d04a960d27e000ab38b7aa8a62/mdr/utils/utils.py#L270-L307
|
[
"def",
"n_way_models",
"(",
"mdr_instance",
",",
"X",
",",
"y",
",",
"n",
"=",
"[",
"2",
"]",
",",
"feature_names",
"=",
"None",
")",
":",
"if",
"feature_names",
"is",
"None",
":",
"feature_names",
"=",
"list",
"(",
"range",
"(",
"X",
".",
"shape",
"[",
"1",
"]",
")",
")",
"for",
"cur_n",
"in",
"n",
":",
"for",
"features",
"in",
"itertools",
".",
"combinations",
"(",
"range",
"(",
"X",
".",
"shape",
"[",
"1",
"]",
")",
",",
"cur_n",
")",
":",
"mdr_model",
"=",
"copy",
".",
"deepcopy",
"(",
"mdr_instance",
")",
"mdr_model",
".",
"fit",
"(",
"X",
"[",
":",
",",
"features",
"]",
",",
"y",
")",
"mdr_model_score",
"=",
"mdr_model",
".",
"score",
"(",
"X",
"[",
":",
",",
"features",
"]",
",",
"y",
")",
"model_features",
"=",
"[",
"feature_names",
"[",
"feature",
"]",
"for",
"feature",
"in",
"features",
"]",
"yield",
"mdr_model",
",",
"mdr_model_score",
",",
"model_features"
] |
768565deb10467d04a960d27e000ab38b7aa8a62
|
test
|
plot_mdr_grid
|
Visualizes the MDR grid of a given fitted MDR instance. Only works for 2-way MDR models.
This function is currently incomplete.
Parameters
----------
mdr_instance: object
A fitted instance of the MDR type to visualize.
Returns
----------
fig: matplotlib.figure
Figure object for the visualized MDR grid.
|
mdr/utils/utils.py
|
def plot_mdr_grid(mdr_instance):
"""Visualizes the MDR grid of a given fitted MDR instance. Only works for 2-way MDR models.
This function is currently incomplete.
Parameters
----------
mdr_instance: object
A fitted instance of the MDR type to visualize.
Returns
----------
fig: matplotlib.figure
Figure object for the visualized MDR grid.
"""
var1_levels = list(set([variables[0] for variables in mdr_instance.feature_map]))
var2_levels = list(set([variables[1] for variables in mdr_instance.feature_map]))
max_count = np.array(list(mdr_instance.class_count_matrix.values())).flatten().max()
"""
TODO:
- Add common axis labels
- Make sure this scales for smaller and larger record sizes
- Extend to 3-way+ models, e.g., http://4.bp.blogspot.com/-vgKCjEkWFUc/UPwPuHo6XvI/AAAAAAAAAE0/fORHqDcoikE/s1600/model.jpg
"""
fig, splots = plt.subplots(ncols=len(var1_levels), nrows=len(var2_levels), sharey=True, sharex=True)
fig.set_figwidth(6)
fig.set_figheight(6)
for (var1, var2) in itertools.product(var1_levels, var2_levels):
class_counts = mdr_instance.class_count_matrix[(var1, var2)]
splot = splots[var2_levels.index(var2)][var1_levels.index(var1)]
splot.set_yticks([])
splot.set_xticks([])
splot.set_ylim(0, max_count * 1.5)
splot.set_xlim(-0.5, 1.5)
if var2_levels.index(var2) == 0:
splot.set_title('X1 = {}'.format(var1), fontsize=12)
if var1_levels.index(var1) == 0:
splot.set_ylabel('X2 = {}'.format(var2), fontsize=12)
bars = splot.bar(left=range(class_counts.shape[0]),
height=class_counts, width=0.5,
color='black', align='center')
bgcolor = 'lightgrey' if mdr_instance.feature_map[(var1, var2)] == 0 else 'darkgrey'
splot.set_axis_bgcolor(bgcolor)
for index, bar in enumerate(bars):
splot.text(index, class_counts[index] + (max_count * 0.1), class_counts[index], ha='center')
fig.tight_layout()
return fig
|
def plot_mdr_grid(mdr_instance):
"""Visualizes the MDR grid of a given fitted MDR instance. Only works for 2-way MDR models.
This function is currently incomplete.
Parameters
----------
mdr_instance: object
A fitted instance of the MDR type to visualize.
Returns
----------
fig: matplotlib.figure
Figure object for the visualized MDR grid.
"""
var1_levels = list(set([variables[0] for variables in mdr_instance.feature_map]))
var2_levels = list(set([variables[1] for variables in mdr_instance.feature_map]))
max_count = np.array(list(mdr_instance.class_count_matrix.values())).flatten().max()
"""
TODO:
- Add common axis labels
- Make sure this scales for smaller and larger record sizes
- Extend to 3-way+ models, e.g., http://4.bp.blogspot.com/-vgKCjEkWFUc/UPwPuHo6XvI/AAAAAAAAAE0/fORHqDcoikE/s1600/model.jpg
"""
fig, splots = plt.subplots(ncols=len(var1_levels), nrows=len(var2_levels), sharey=True, sharex=True)
fig.set_figwidth(6)
fig.set_figheight(6)
for (var1, var2) in itertools.product(var1_levels, var2_levels):
class_counts = mdr_instance.class_count_matrix[(var1, var2)]
splot = splots[var2_levels.index(var2)][var1_levels.index(var1)]
splot.set_yticks([])
splot.set_xticks([])
splot.set_ylim(0, max_count * 1.5)
splot.set_xlim(-0.5, 1.5)
if var2_levels.index(var2) == 0:
splot.set_title('X1 = {}'.format(var1), fontsize=12)
if var1_levels.index(var1) == 0:
splot.set_ylabel('X2 = {}'.format(var2), fontsize=12)
bars = splot.bar(left=range(class_counts.shape[0]),
height=class_counts, width=0.5,
color='black', align='center')
bgcolor = 'lightgrey' if mdr_instance.feature_map[(var1, var2)] == 0 else 'darkgrey'
splot.set_axis_bgcolor(bgcolor)
for index, bar in enumerate(bars):
splot.text(index, class_counts[index] + (max_count * 0.1), class_counts[index], ha='center')
fig.tight_layout()
return fig
|
[
"Visualizes",
"the",
"MDR",
"grid",
"of",
"a",
"given",
"fitted",
"MDR",
"instance",
".",
"Only",
"works",
"for",
"2",
"-",
"way",
"MDR",
"models",
".",
"This",
"function",
"is",
"currently",
"incomplete",
"."
] |
EpistasisLab/scikit-mdr
|
python
|
https://github.com/EpistasisLab/scikit-mdr/blob/768565deb10467d04a960d27e000ab38b7aa8a62/mdr/utils/utils.py#L309-L363
|
[
"def",
"plot_mdr_grid",
"(",
"mdr_instance",
")",
":",
"var1_levels",
"=",
"list",
"(",
"set",
"(",
"[",
"variables",
"[",
"0",
"]",
"for",
"variables",
"in",
"mdr_instance",
".",
"feature_map",
"]",
")",
")",
"var2_levels",
"=",
"list",
"(",
"set",
"(",
"[",
"variables",
"[",
"1",
"]",
"for",
"variables",
"in",
"mdr_instance",
".",
"feature_map",
"]",
")",
")",
"max_count",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"mdr_instance",
".",
"class_count_matrix",
".",
"values",
"(",
")",
")",
")",
".",
"flatten",
"(",
")",
".",
"max",
"(",
")",
"\"\"\"\n TODO:\n - Add common axis labels\n - Make sure this scales for smaller and larger record sizes\n - Extend to 3-way+ models, e.g., http://4.bp.blogspot.com/-vgKCjEkWFUc/UPwPuHo6XvI/AAAAAAAAAE0/fORHqDcoikE/s1600/model.jpg\n \"\"\"",
"fig",
",",
"splots",
"=",
"plt",
".",
"subplots",
"(",
"ncols",
"=",
"len",
"(",
"var1_levels",
")",
",",
"nrows",
"=",
"len",
"(",
"var2_levels",
")",
",",
"sharey",
"=",
"True",
",",
"sharex",
"=",
"True",
")",
"fig",
".",
"set_figwidth",
"(",
"6",
")",
"fig",
".",
"set_figheight",
"(",
"6",
")",
"for",
"(",
"var1",
",",
"var2",
")",
"in",
"itertools",
".",
"product",
"(",
"var1_levels",
",",
"var2_levels",
")",
":",
"class_counts",
"=",
"mdr_instance",
".",
"class_count_matrix",
"[",
"(",
"var1",
",",
"var2",
")",
"]",
"splot",
"=",
"splots",
"[",
"var2_levels",
".",
"index",
"(",
"var2",
")",
"]",
"[",
"var1_levels",
".",
"index",
"(",
"var1",
")",
"]",
"splot",
".",
"set_yticks",
"(",
"[",
"]",
")",
"splot",
".",
"set_xticks",
"(",
"[",
"]",
")",
"splot",
".",
"set_ylim",
"(",
"0",
",",
"max_count",
"*",
"1.5",
")",
"splot",
".",
"set_xlim",
"(",
"-",
"0.5",
",",
"1.5",
")",
"if",
"var2_levels",
".",
"index",
"(",
"var2",
")",
"==",
"0",
":",
"splot",
".",
"set_title",
"(",
"'X1 = {}'",
".",
"format",
"(",
"var1",
")",
",",
"fontsize",
"=",
"12",
")",
"if",
"var1_levels",
".",
"index",
"(",
"var1",
")",
"==",
"0",
":",
"splot",
".",
"set_ylabel",
"(",
"'X2 = {}'",
".",
"format",
"(",
"var2",
")",
",",
"fontsize",
"=",
"12",
")",
"bars",
"=",
"splot",
".",
"bar",
"(",
"left",
"=",
"range",
"(",
"class_counts",
".",
"shape",
"[",
"0",
"]",
")",
",",
"height",
"=",
"class_counts",
",",
"width",
"=",
"0.5",
",",
"color",
"=",
"'black'",
",",
"align",
"=",
"'center'",
")",
"bgcolor",
"=",
"'lightgrey'",
"if",
"mdr_instance",
".",
"feature_map",
"[",
"(",
"var1",
",",
"var2",
")",
"]",
"==",
"0",
"else",
"'darkgrey'",
"splot",
".",
"set_axis_bgcolor",
"(",
"bgcolor",
")",
"for",
"index",
",",
"bar",
"in",
"enumerate",
"(",
"bars",
")",
":",
"splot",
".",
"text",
"(",
"index",
",",
"class_counts",
"[",
"index",
"]",
"+",
"(",
"max_count",
"*",
"0.1",
")",
",",
"class_counts",
"[",
"index",
"]",
",",
"ha",
"=",
"'center'",
")",
"fig",
".",
"tight_layout",
"(",
")",
"return",
"fig"
] |
768565deb10467d04a960d27e000ab38b7aa8a62
|
test
|
makemigrations
|
等价于 django makemigrations 操作
|
fantasy/cli.py
|
def makemigrations(migrations_root):
"""等价于 django makemigrations 操作"""
from flask_migrate import (Migrate, init as migrate_init,
migrate as migrate_exec)
migrations_root = migrations_root or os.path.join(
os.environ.get('FANTASY_MIGRATION_PATH',
os.getcwd()),
'migrations')
migrations_root = os.path.expanduser(migrations_root)
mig = Migrate(app, app.db, directory=migrations_root)
if not os.path.exists(migrations_root):
migrate_init(migrations_root)
pass
models_file = os.path.join(migrations_root, 'models.txt')
if not os.path.exists(models_file):
with open(models_file, 'w') as fw:
fw.write('# add module name in this file.')
pass
pass
with open(models_file, 'r') as fp:
modules = fp.readlines()
pass
modules = filter(lambda x: x.strip("\n"), modules)
modules = map(lambda x: x.strip("\n").split("#")[0].strip(), modules)
modules = list(filter(lambda x: x, modules))
if not modules:
click.echo(
click.style('No models found,'
'skip create migrations...'
'You need edit models.txt file set your module',
fg='yellow'))
sys.exit(0)
for m in modules:
importlib.import_module(m + '.models')
pass
migrate_exec(migrations_root)
mig.init_app(app, app.db)
pass
|
def makemigrations(migrations_root):
"""等价于 django makemigrations 操作"""
from flask_migrate import (Migrate, init as migrate_init,
migrate as migrate_exec)
migrations_root = migrations_root or os.path.join(
os.environ.get('FANTASY_MIGRATION_PATH',
os.getcwd()),
'migrations')
migrations_root = os.path.expanduser(migrations_root)
mig = Migrate(app, app.db, directory=migrations_root)
if not os.path.exists(migrations_root):
migrate_init(migrations_root)
pass
models_file = os.path.join(migrations_root, 'models.txt')
if not os.path.exists(models_file):
with open(models_file, 'w') as fw:
fw.write('# add module name in this file.')
pass
pass
with open(models_file, 'r') as fp:
modules = fp.readlines()
pass
modules = filter(lambda x: x.strip("\n"), modules)
modules = map(lambda x: x.strip("\n").split("#")[0].strip(), modules)
modules = list(filter(lambda x: x, modules))
if not modules:
click.echo(
click.style('No models found,'
'skip create migrations...'
'You need edit models.txt file set your module',
fg='yellow'))
sys.exit(0)
for m in modules:
importlib.import_module(m + '.models')
pass
migrate_exec(migrations_root)
mig.init_app(app, app.db)
pass
|
[
"等价于",
"django",
"makemigrations",
"操作"
] |
wangwenpei/fantasy
|
python
|
https://github.com/wangwenpei/fantasy/blob/0fe92059bd868f14da84235beb05b217b1d46e4a/fantasy/cli.py#L29-L77
|
[
"def",
"makemigrations",
"(",
"migrations_root",
")",
":",
"from",
"flask_migrate",
"import",
"(",
"Migrate",
",",
"init",
"as",
"migrate_init",
",",
"migrate",
"as",
"migrate_exec",
")",
"migrations_root",
"=",
"migrations_root",
"or",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"environ",
".",
"get",
"(",
"'FANTASY_MIGRATION_PATH'",
",",
"os",
".",
"getcwd",
"(",
")",
")",
",",
"'migrations'",
")",
"migrations_root",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"migrations_root",
")",
"mig",
"=",
"Migrate",
"(",
"app",
",",
"app",
".",
"db",
",",
"directory",
"=",
"migrations_root",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"migrations_root",
")",
":",
"migrate_init",
"(",
"migrations_root",
")",
"pass",
"models_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"migrations_root",
",",
"'models.txt'",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"models_file",
")",
":",
"with",
"open",
"(",
"models_file",
",",
"'w'",
")",
"as",
"fw",
":",
"fw",
".",
"write",
"(",
"'# add module name in this file.'",
")",
"pass",
"pass",
"with",
"open",
"(",
"models_file",
",",
"'r'",
")",
"as",
"fp",
":",
"modules",
"=",
"fp",
".",
"readlines",
"(",
")",
"pass",
"modules",
"=",
"filter",
"(",
"lambda",
"x",
":",
"x",
".",
"strip",
"(",
"\"\\n\"",
")",
",",
"modules",
")",
"modules",
"=",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"strip",
"(",
"\"\\n\"",
")",
".",
"split",
"(",
"\"#\"",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
",",
"modules",
")",
"modules",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"x",
":",
"x",
",",
"modules",
")",
")",
"if",
"not",
"modules",
":",
"click",
".",
"echo",
"(",
"click",
".",
"style",
"(",
"'No models found,'",
"'skip create migrations...'",
"'You need edit models.txt file set your module'",
",",
"fg",
"=",
"'yellow'",
")",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"for",
"m",
"in",
"modules",
":",
"importlib",
".",
"import_module",
"(",
"m",
"+",
"'.models'",
")",
"pass",
"migrate_exec",
"(",
"migrations_root",
")",
"mig",
".",
"init_app",
"(",
"app",
",",
"app",
".",
"db",
")",
"pass"
] |
0fe92059bd868f14da84235beb05b217b1d46e4a
|
test
|
migrate
|
等价于 django migrate 操作
|
fantasy/cli.py
|
def migrate(migrations_root):
"""等价于 django migrate 操作"""
from flask_migrate import Migrate, upgrade as migrate_upgrade
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.engine.url import make_url
from sqlalchemy_utils import database_exists, create_database
db = SQLAlchemy()
dsn = make_url(app.config['SQLALCHEMY_DATABASE_URI'])
if not database_exists(dsn):
create_database(dsn)
pass
migrations_root = migrations_root or os.path.join(
os.environ.get('FANTASY_MIGRATION_PATH',
os.getcwd()),
'migrations')
migrations_root = os.path.expanduser(migrations_root)
if os.path.exists(migrations_root):
mig = Migrate(app, db, directory=migrations_root)
mig.init_app(app, db)
migrate_upgrade(migrations_root)
else:
click.echo(
click.style('migration files not exist,skip migrate...', fg='red'))
sys.exit(-1)
pass
|
def migrate(migrations_root):
"""等价于 django migrate 操作"""
from flask_migrate import Migrate, upgrade as migrate_upgrade
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.engine.url import make_url
from sqlalchemy_utils import database_exists, create_database
db = SQLAlchemy()
dsn = make_url(app.config['SQLALCHEMY_DATABASE_URI'])
if not database_exists(dsn):
create_database(dsn)
pass
migrations_root = migrations_root or os.path.join(
os.environ.get('FANTASY_MIGRATION_PATH',
os.getcwd()),
'migrations')
migrations_root = os.path.expanduser(migrations_root)
if os.path.exists(migrations_root):
mig = Migrate(app, db, directory=migrations_root)
mig.init_app(app, db)
migrate_upgrade(migrations_root)
else:
click.echo(
click.style('migration files not exist,skip migrate...', fg='red'))
sys.exit(-1)
pass
|
[
"等价于",
"django",
"migrate",
"操作"
] |
wangwenpei/fantasy
|
python
|
https://github.com/wangwenpei/fantasy/blob/0fe92059bd868f14da84235beb05b217b1d46e4a/fantasy/cli.py#L83-L110
|
[
"def",
"migrate",
"(",
"migrations_root",
")",
":",
"from",
"flask_migrate",
"import",
"Migrate",
",",
"upgrade",
"as",
"migrate_upgrade",
"from",
"flask_sqlalchemy",
"import",
"SQLAlchemy",
"from",
"sqlalchemy",
".",
"engine",
".",
"url",
"import",
"make_url",
"from",
"sqlalchemy_utils",
"import",
"database_exists",
",",
"create_database",
"db",
"=",
"SQLAlchemy",
"(",
")",
"dsn",
"=",
"make_url",
"(",
"app",
".",
"config",
"[",
"'SQLALCHEMY_DATABASE_URI'",
"]",
")",
"if",
"not",
"database_exists",
"(",
"dsn",
")",
":",
"create_database",
"(",
"dsn",
")",
"pass",
"migrations_root",
"=",
"migrations_root",
"or",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"environ",
".",
"get",
"(",
"'FANTASY_MIGRATION_PATH'",
",",
"os",
".",
"getcwd",
"(",
")",
")",
",",
"'migrations'",
")",
"migrations_root",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"migrations_root",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"migrations_root",
")",
":",
"mig",
"=",
"Migrate",
"(",
"app",
",",
"db",
",",
"directory",
"=",
"migrations_root",
")",
"mig",
".",
"init_app",
"(",
"app",
",",
"db",
")",
"migrate_upgrade",
"(",
"migrations_root",
")",
"else",
":",
"click",
".",
"echo",
"(",
"click",
".",
"style",
"(",
"'migration files not exist,skip migrate...'",
",",
"fg",
"=",
"'red'",
")",
")",
"sys",
".",
"exit",
"(",
"-",
"1",
")",
"pass"
] |
0fe92059bd868f14da84235beb05b217b1d46e4a
|
test
|
requirements
|
编译全新依赖文件
|
fantasy/cli.py
|
def requirements(work_dir, hive_root, with_requirements,
with_dockerfile, active_module, active_module_file):
"""编译全新依赖文件"""
import sys
sys.path.insert(0, hive_root)
hive_root = os.path.abspath(os.path.expanduser(hive_root))
work_dir = work_dir or os.path.join(
os.environ.get('FANTASY_APP_PATH',
os.getcwd()))
work_dir = os.path.expanduser(work_dir)
requirements_root = os.path.join(work_dir, 'requirements')
migrate_root = os.path.join(work_dir, 'migrations')
# active_modules 严格按照顺序
active_module_paths = []
active_module_list = []
if active_module_file:
with open(active_module_file, 'r') as fp:
for l in fp:
pkg = l.split('#')[0].strip()
if pkg:
active_module_list.append(l.strip("\n"))
pass
active_module_list += active_module
for m in active_module_list:
try:
mod = importlib.import_module(m)
active_module_paths.append(os.path.dirname(mod.__file__))
except ImportError:
click.echo('module "%s" not found.' % m, color="yellow")
pass
pass
def build_requirements():
"""构造requirements文件
requirements文件共分为两份:
- hive.txt 从hive项目中直接复制
- hive-modules.txt 从指定的模块中装载依赖项
.. note::
requirements要求必须是顺序无关的
因为我们会使用set来去重,并按照value排序
"""
if not os.path.exists(requirements_root):
os.makedirs(requirements_root)
pass
click.echo(click.style("Generate hive requirements...", fg="yellow"))
shutil.copy(
os.path.join(hive_root, 'requirements.txt'),
os.path.join(requirements_root, 'hive.txt')
)
click.echo(click.style("Generate hive-module requirements...",
fg="yellow"))
requirements_files = []
for m in active_module_paths:
t = os.path.join(m, 'requirements.txt')
if os.path.exists(t):
requirements_files.append(t)
pass
module_packages = set()
with fileinput.input(requirements_files) as fp:
for line in fp:
pkg = line.split('#')[0].strip()
if pkg:
module_packages.add(pkg)
pass
with click.open_file(
os.path.join(requirements_root, 'hive-modules.txt'),
'w') as fp:
for p in module_packages:
fp.write("%s\n" % p)
pass
pass
def build_dockerfile():
"""构造Dockerfile"""
modules_in_hive = map(
lambda x: x.replace(hive_root, '').lstrip('/'),
filter(lambda x: x.startswith(hive_root),
active_module_paths))
modules_path = ' '.join(modules_in_hive)
docker_file = os.path.join(
os.path.dirname(requirements_root),
'Dockerfile'
)
# update Dockerfile
if os.path.exists(docker_file):
click.echo(click.style("Found Dockerfile,try update...",
fg="yellow"))
with open(docker_file, 'r') as fp:
buffer = fp.read()
pass
import re
replaced = re.sub('ARG HIVE_PACKAGES=".*"',
'ARG HIVE_PACKAGES="%s"' % modules_path,
buffer)
with open(docker_file, 'w') as fp:
fp.write(replaced)
pass
pass
pass
def build_migrations():
models_pairs = filter(
lambda pair: os.path.exists(pair[0]),
map(lambda x: (os.path.join(x[0], 'models.py'), x[1]),
[(v, active_module_list[i]) for i, v in
enumerate(active_module_paths)]))
try:
_, models = zip(*models_pairs)
except ValueError:
click.echo(click.style("No models found,"
"is it include in "
"your PYTHONPATH?\n"
"Modules: %s" %
','.join(active_module_list),
fg="yellow"))
return
click.echo(click.style("Found models.txt,try update...",
fg="yellow"))
with open(os.path.join(migrate_root, 'models.txt'), 'w') as fp:
for p in models:
fp.write("%s\n" % p)
pass
pass
def build_tasks():
tasks_pairs = filter(
lambda pair: os.path.exists(pair[0]),
map(lambda x: (os.path.join(x[0], 'tasks.py'), x[1]),
[(v, active_module_list[i]) for i, v in
enumerate(active_module_paths)]))
try:
_, tasks = zip(*tasks_pairs)
except ValueError:
click.echo(click.style("No tasks found,"
"is it include in "
"your PYTHONPATH?\n"
"Modules: %s" %
','.join(active_module_list),
fg="yellow"))
return
click.echo(click.style("Found tasks.txt,try update...",
fg="yellow"))
with open(os.path.join(migrate_root, 'tasks.txt'), 'w') as fp:
for p in tasks:
fp.write("%s\n" % p)
pass
if with_requirements:
build_requirements()
if with_dockerfile:
build_dockerfile()
if os.path.exists(migrate_root):
build_migrations()
if os.path.exists(migrate_root):
build_tasks()
click.echo(click.style("Generate done...", fg="yellow"))
pass
|
def requirements(work_dir, hive_root, with_requirements,
with_dockerfile, active_module, active_module_file):
"""编译全新依赖文件"""
import sys
sys.path.insert(0, hive_root)
hive_root = os.path.abspath(os.path.expanduser(hive_root))
work_dir = work_dir or os.path.join(
os.environ.get('FANTASY_APP_PATH',
os.getcwd()))
work_dir = os.path.expanduser(work_dir)
requirements_root = os.path.join(work_dir, 'requirements')
migrate_root = os.path.join(work_dir, 'migrations')
# active_modules 严格按照顺序
active_module_paths = []
active_module_list = []
if active_module_file:
with open(active_module_file, 'r') as fp:
for l in fp:
pkg = l.split('#')[0].strip()
if pkg:
active_module_list.append(l.strip("\n"))
pass
active_module_list += active_module
for m in active_module_list:
try:
mod = importlib.import_module(m)
active_module_paths.append(os.path.dirname(mod.__file__))
except ImportError:
click.echo('module "%s" not found.' % m, color="yellow")
pass
pass
def build_requirements():
"""构造requirements文件
requirements文件共分为两份:
- hive.txt 从hive项目中直接复制
- hive-modules.txt 从指定的模块中装载依赖项
.. note::
requirements要求必须是顺序无关的
因为我们会使用set来去重,并按照value排序
"""
if not os.path.exists(requirements_root):
os.makedirs(requirements_root)
pass
click.echo(click.style("Generate hive requirements...", fg="yellow"))
shutil.copy(
os.path.join(hive_root, 'requirements.txt'),
os.path.join(requirements_root, 'hive.txt')
)
click.echo(click.style("Generate hive-module requirements...",
fg="yellow"))
requirements_files = []
for m in active_module_paths:
t = os.path.join(m, 'requirements.txt')
if os.path.exists(t):
requirements_files.append(t)
pass
module_packages = set()
with fileinput.input(requirements_files) as fp:
for line in fp:
pkg = line.split('#')[0].strip()
if pkg:
module_packages.add(pkg)
pass
with click.open_file(
os.path.join(requirements_root, 'hive-modules.txt'),
'w') as fp:
for p in module_packages:
fp.write("%s\n" % p)
pass
pass
def build_dockerfile():
"""构造Dockerfile"""
modules_in_hive = map(
lambda x: x.replace(hive_root, '').lstrip('/'),
filter(lambda x: x.startswith(hive_root),
active_module_paths))
modules_path = ' '.join(modules_in_hive)
docker_file = os.path.join(
os.path.dirname(requirements_root),
'Dockerfile'
)
# update Dockerfile
if os.path.exists(docker_file):
click.echo(click.style("Found Dockerfile,try update...",
fg="yellow"))
with open(docker_file, 'r') as fp:
buffer = fp.read()
pass
import re
replaced = re.sub('ARG HIVE_PACKAGES=".*"',
'ARG HIVE_PACKAGES="%s"' % modules_path,
buffer)
with open(docker_file, 'w') as fp:
fp.write(replaced)
pass
pass
pass
def build_migrations():
models_pairs = filter(
lambda pair: os.path.exists(pair[0]),
map(lambda x: (os.path.join(x[0], 'models.py'), x[1]),
[(v, active_module_list[i]) for i, v in
enumerate(active_module_paths)]))
try:
_, models = zip(*models_pairs)
except ValueError:
click.echo(click.style("No models found,"
"is it include in "
"your PYTHONPATH?\n"
"Modules: %s" %
','.join(active_module_list),
fg="yellow"))
return
click.echo(click.style("Found models.txt,try update...",
fg="yellow"))
with open(os.path.join(migrate_root, 'models.txt'), 'w') as fp:
for p in models:
fp.write("%s\n" % p)
pass
pass
def build_tasks():
tasks_pairs = filter(
lambda pair: os.path.exists(pair[0]),
map(lambda x: (os.path.join(x[0], 'tasks.py'), x[1]),
[(v, active_module_list[i]) for i, v in
enumerate(active_module_paths)]))
try:
_, tasks = zip(*tasks_pairs)
except ValueError:
click.echo(click.style("No tasks found,"
"is it include in "
"your PYTHONPATH?\n"
"Modules: %s" %
','.join(active_module_list),
fg="yellow"))
return
click.echo(click.style("Found tasks.txt,try update...",
fg="yellow"))
with open(os.path.join(migrate_root, 'tasks.txt'), 'w') as fp:
for p in tasks:
fp.write("%s\n" % p)
pass
if with_requirements:
build_requirements()
if with_dockerfile:
build_dockerfile()
if os.path.exists(migrate_root):
build_migrations()
if os.path.exists(migrate_root):
build_tasks()
click.echo(click.style("Generate done...", fg="yellow"))
pass
|
[
"编译全新依赖文件"
] |
wangwenpei/fantasy
|
python
|
https://github.com/wangwenpei/fantasy/blob/0fe92059bd868f14da84235beb05b217b1d46e4a/fantasy/cli.py#L124-L314
|
[
"def",
"requirements",
"(",
"work_dir",
",",
"hive_root",
",",
"with_requirements",
",",
"with_dockerfile",
",",
"active_module",
",",
"active_module_file",
")",
":",
"import",
"sys",
"sys",
".",
"path",
".",
"insert",
"(",
"0",
",",
"hive_root",
")",
"hive_root",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"hive_root",
")",
")",
"work_dir",
"=",
"work_dir",
"or",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"environ",
".",
"get",
"(",
"'FANTASY_APP_PATH'",
",",
"os",
".",
"getcwd",
"(",
")",
")",
")",
"work_dir",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"work_dir",
")",
"requirements_root",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'requirements'",
")",
"migrate_root",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'migrations'",
")",
"# active_modules 严格按照顺序",
"active_module_paths",
"=",
"[",
"]",
"active_module_list",
"=",
"[",
"]",
"if",
"active_module_file",
":",
"with",
"open",
"(",
"active_module_file",
",",
"'r'",
")",
"as",
"fp",
":",
"for",
"l",
"in",
"fp",
":",
"pkg",
"=",
"l",
".",
"split",
"(",
"'#'",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"if",
"pkg",
":",
"active_module_list",
".",
"append",
"(",
"l",
".",
"strip",
"(",
"\"\\n\"",
")",
")",
"pass",
"active_module_list",
"+=",
"active_module",
"for",
"m",
"in",
"active_module_list",
":",
"try",
":",
"mod",
"=",
"importlib",
".",
"import_module",
"(",
"m",
")",
"active_module_paths",
".",
"append",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"mod",
".",
"__file__",
")",
")",
"except",
"ImportError",
":",
"click",
".",
"echo",
"(",
"'module \"%s\" not found.'",
"%",
"m",
",",
"color",
"=",
"\"yellow\"",
")",
"pass",
"pass",
"def",
"build_requirements",
"(",
")",
":",
"\"\"\"构造requirements文件\n\n requirements文件共分为两份:\n\n - hive.txt 从hive项目中直接复制\n - hive-modules.txt 从指定的模块中装载依赖项\n\n .. note::\n requirements要求必须是顺序无关的\n 因为我们会使用set来去重,并按照value排序\n\n \"\"\"",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"requirements_root",
")",
":",
"os",
".",
"makedirs",
"(",
"requirements_root",
")",
"pass",
"click",
".",
"echo",
"(",
"click",
".",
"style",
"(",
"\"Generate hive requirements...\"",
",",
"fg",
"=",
"\"yellow\"",
")",
")",
"shutil",
".",
"copy",
"(",
"os",
".",
"path",
".",
"join",
"(",
"hive_root",
",",
"'requirements.txt'",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"requirements_root",
",",
"'hive.txt'",
")",
")",
"click",
".",
"echo",
"(",
"click",
".",
"style",
"(",
"\"Generate hive-module requirements...\"",
",",
"fg",
"=",
"\"yellow\"",
")",
")",
"requirements_files",
"=",
"[",
"]",
"for",
"m",
"in",
"active_module_paths",
":",
"t",
"=",
"os",
".",
"path",
".",
"join",
"(",
"m",
",",
"'requirements.txt'",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"t",
")",
":",
"requirements_files",
".",
"append",
"(",
"t",
")",
"pass",
"module_packages",
"=",
"set",
"(",
")",
"with",
"fileinput",
".",
"input",
"(",
"requirements_files",
")",
"as",
"fp",
":",
"for",
"line",
"in",
"fp",
":",
"pkg",
"=",
"line",
".",
"split",
"(",
"'#'",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"if",
"pkg",
":",
"module_packages",
".",
"add",
"(",
"pkg",
")",
"pass",
"with",
"click",
".",
"open_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"requirements_root",
",",
"'hive-modules.txt'",
")",
",",
"'w'",
")",
"as",
"fp",
":",
"for",
"p",
"in",
"module_packages",
":",
"fp",
".",
"write",
"(",
"\"%s\\n\"",
"%",
"p",
")",
"pass",
"pass",
"def",
"build_dockerfile",
"(",
")",
":",
"\"\"\"构造Dockerfile\"\"\"",
"modules_in_hive",
"=",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"replace",
"(",
"hive_root",
",",
"''",
")",
".",
"lstrip",
"(",
"'/'",
")",
",",
"filter",
"(",
"lambda",
"x",
":",
"x",
".",
"startswith",
"(",
"hive_root",
")",
",",
"active_module_paths",
")",
")",
"modules_path",
"=",
"' '",
".",
"join",
"(",
"modules_in_hive",
")",
"docker_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"requirements_root",
")",
",",
"'Dockerfile'",
")",
"# update Dockerfile",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"docker_file",
")",
":",
"click",
".",
"echo",
"(",
"click",
".",
"style",
"(",
"\"Found Dockerfile,try update...\"",
",",
"fg",
"=",
"\"yellow\"",
")",
")",
"with",
"open",
"(",
"docker_file",
",",
"'r'",
")",
"as",
"fp",
":",
"buffer",
"=",
"fp",
".",
"read",
"(",
")",
"pass",
"import",
"re",
"replaced",
"=",
"re",
".",
"sub",
"(",
"'ARG HIVE_PACKAGES=\".*\"'",
",",
"'ARG HIVE_PACKAGES=\"%s\"'",
"%",
"modules_path",
",",
"buffer",
")",
"with",
"open",
"(",
"docker_file",
",",
"'w'",
")",
"as",
"fp",
":",
"fp",
".",
"write",
"(",
"replaced",
")",
"pass",
"pass",
"pass",
"def",
"build_migrations",
"(",
")",
":",
"models_pairs",
"=",
"filter",
"(",
"lambda",
"pair",
":",
"os",
".",
"path",
".",
"exists",
"(",
"pair",
"[",
"0",
"]",
")",
",",
"map",
"(",
"lambda",
"x",
":",
"(",
"os",
".",
"path",
".",
"join",
"(",
"x",
"[",
"0",
"]",
",",
"'models.py'",
")",
",",
"x",
"[",
"1",
"]",
")",
",",
"[",
"(",
"v",
",",
"active_module_list",
"[",
"i",
"]",
")",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"active_module_paths",
")",
"]",
")",
")",
"try",
":",
"_",
",",
"models",
"=",
"zip",
"(",
"*",
"models_pairs",
")",
"except",
"ValueError",
":",
"click",
".",
"echo",
"(",
"click",
".",
"style",
"(",
"\"No models found,\"",
"\"is it include in \"",
"\"your PYTHONPATH?\\n\"",
"\"Modules: %s\"",
"%",
"','",
".",
"join",
"(",
"active_module_list",
")",
",",
"fg",
"=",
"\"yellow\"",
")",
")",
"return",
"click",
".",
"echo",
"(",
"click",
".",
"style",
"(",
"\"Found models.txt,try update...\"",
",",
"fg",
"=",
"\"yellow\"",
")",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"migrate_root",
",",
"'models.txt'",
")",
",",
"'w'",
")",
"as",
"fp",
":",
"for",
"p",
"in",
"models",
":",
"fp",
".",
"write",
"(",
"\"%s\\n\"",
"%",
"p",
")",
"pass",
"pass",
"def",
"build_tasks",
"(",
")",
":",
"tasks_pairs",
"=",
"filter",
"(",
"lambda",
"pair",
":",
"os",
".",
"path",
".",
"exists",
"(",
"pair",
"[",
"0",
"]",
")",
",",
"map",
"(",
"lambda",
"x",
":",
"(",
"os",
".",
"path",
".",
"join",
"(",
"x",
"[",
"0",
"]",
",",
"'tasks.py'",
")",
",",
"x",
"[",
"1",
"]",
")",
",",
"[",
"(",
"v",
",",
"active_module_list",
"[",
"i",
"]",
")",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"active_module_paths",
")",
"]",
")",
")",
"try",
":",
"_",
",",
"tasks",
"=",
"zip",
"(",
"*",
"tasks_pairs",
")",
"except",
"ValueError",
":",
"click",
".",
"echo",
"(",
"click",
".",
"style",
"(",
"\"No tasks found,\"",
"\"is it include in \"",
"\"your PYTHONPATH?\\n\"",
"\"Modules: %s\"",
"%",
"','",
".",
"join",
"(",
"active_module_list",
")",
",",
"fg",
"=",
"\"yellow\"",
")",
")",
"return",
"click",
".",
"echo",
"(",
"click",
".",
"style",
"(",
"\"Found tasks.txt,try update...\"",
",",
"fg",
"=",
"\"yellow\"",
")",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"migrate_root",
",",
"'tasks.txt'",
")",
",",
"'w'",
")",
"as",
"fp",
":",
"for",
"p",
"in",
"tasks",
":",
"fp",
".",
"write",
"(",
"\"%s\\n\"",
"%",
"p",
")",
"pass",
"if",
"with_requirements",
":",
"build_requirements",
"(",
")",
"if",
"with_dockerfile",
":",
"build_dockerfile",
"(",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"migrate_root",
")",
":",
"build_migrations",
"(",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"migrate_root",
")",
":",
"build_tasks",
"(",
")",
"click",
".",
"echo",
"(",
"click",
".",
"style",
"(",
"\"Generate done...\"",
",",
"fg",
"=",
"\"yellow\"",
")",
")",
"pass"
] |
0fe92059bd868f14da84235beb05b217b1d46e4a
|
test
|
queue
|
启动队列服务[开发中]
|
fantasy/cli.py
|
def queue(celery_arguments):
"""启动队列服务[开发中]"""
if not app.celery:
return click.echo(
click.style('No celery config found,skip start...', fg='yellow'))
celery = app.celery
celery.autodiscover_tasks()
argv = celery_arguments.split()
argv.insert(0, 'worker')
argv.insert(0, 'Queue')
celery.worker_main(argv)
pass
|
def queue(celery_arguments):
"""启动队列服务[开发中]"""
if not app.celery:
return click.echo(
click.style('No celery config found,skip start...', fg='yellow'))
celery = app.celery
celery.autodiscover_tasks()
argv = celery_arguments.split()
argv.insert(0, 'worker')
argv.insert(0, 'Queue')
celery.worker_main(argv)
pass
|
[
"启动队列服务",
"[",
"开发中",
"]"
] |
wangwenpei/fantasy
|
python
|
https://github.com/wangwenpei/fantasy/blob/0fe92059bd868f14da84235beb05b217b1d46e4a/fantasy/cli.py#L321-L335
|
[
"def",
"queue",
"(",
"celery_arguments",
")",
":",
"if",
"not",
"app",
".",
"celery",
":",
"return",
"click",
".",
"echo",
"(",
"click",
".",
"style",
"(",
"'No celery config found,skip start...', ",
"f",
"='",
"y",
"ellow'))",
"",
"",
"celery",
"=",
"app",
".",
"celery",
"celery",
".",
"autodiscover_tasks",
"(",
")",
"argv",
"=",
"celery_arguments",
".",
"split",
"(",
")",
"argv",
".",
"insert",
"(",
"0",
",",
"'worker'",
")",
"argv",
".",
"insert",
"(",
"0",
",",
"'Queue'",
")",
"celery",
".",
"worker_main",
"(",
"argv",
")",
"pass"
] |
0fe92059bd868f14da84235beb05b217b1d46e4a
|
test
|
smart_database
|
尝试对数据库做初始化操作
|
fantasy/__init__.py
|
def smart_database(app):
"""尝试对数据库做初始化操作"""
from sqlalchemy.engine.url import make_url
from sqlalchemy_utils import database_exists, create_database
# 如果数据库不存在,则尝试创建数据
dsn = make_url(app.config['SQLALCHEMY_DATABASE_URI'])
if not database_exists(dsn):
create_database(dsn)
pass
pass
|
def smart_database(app):
"""尝试对数据库做初始化操作"""
from sqlalchemy.engine.url import make_url
from sqlalchemy_utils import database_exists, create_database
# 如果数据库不存在,则尝试创建数据
dsn = make_url(app.config['SQLALCHEMY_DATABASE_URI'])
if not database_exists(dsn):
create_database(dsn)
pass
pass
|
[
"尝试对数据库做初始化操作"
] |
wangwenpei/fantasy
|
python
|
https://github.com/wangwenpei/fantasy/blob/0fe92059bd868f14da84235beb05b217b1d46e4a/fantasy/__init__.py#L50-L61
|
[
"def",
"smart_database",
"(",
"app",
")",
":",
"from",
"sqlalchemy",
".",
"engine",
".",
"url",
"import",
"make_url",
"from",
"sqlalchemy_utils",
"import",
"database_exists",
",",
"create_database",
"# 如果数据库不存在,则尝试创建数据",
"dsn",
"=",
"make_url",
"(",
"app",
".",
"config",
"[",
"'SQLALCHEMY_DATABASE_URI'",
"]",
")",
"if",
"not",
"database_exists",
"(",
"dsn",
")",
":",
"create_database",
"(",
"dsn",
")",
"pass",
"pass"
] |
0fe92059bd868f14da84235beb05b217b1d46e4a
|
test
|
smart_migrate
|
如果存在migration且指定为primary_node则执行migrate操作
|
fantasy/__init__.py
|
def smart_migrate(app, migrations_root):
"""如果存在migration且指定为primary_node则执行migrate操作"""
db = app.db
if os.path.exists(migrations_root) and \
os.environ['FANTASY_PRIMARY_NODE'] != 'no':
from flask_migrate import (Migrate,
upgrade as migrate_upgrade)
migrate = Migrate(app, db, directory=migrations_root)
migrate.init_app(app, db)
migrate_upgrade(migrations_root)
pass
pass
|
def smart_migrate(app, migrations_root):
"""如果存在migration且指定为primary_node则执行migrate操作"""
db = app.db
if os.path.exists(migrations_root) and \
os.environ['FANTASY_PRIMARY_NODE'] != 'no':
from flask_migrate import (Migrate,
upgrade as migrate_upgrade)
migrate = Migrate(app, db, directory=migrations_root)
migrate.init_app(app, db)
migrate_upgrade(migrations_root)
pass
pass
|
[
"如果存在migration且指定为primary_node则执行migrate操作"
] |
wangwenpei/fantasy
|
python
|
https://github.com/wangwenpei/fantasy/blob/0fe92059bd868f14da84235beb05b217b1d46e4a/fantasy/__init__.py#L64-L77
|
[
"def",
"smart_migrate",
"(",
"app",
",",
"migrations_root",
")",
":",
"db",
"=",
"app",
".",
"db",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"migrations_root",
")",
"and",
"os",
".",
"environ",
"[",
"'FANTASY_PRIMARY_NODE'",
"]",
"!=",
"'no'",
":",
"from",
"flask_migrate",
"import",
"(",
"Migrate",
",",
"upgrade",
"as",
"migrate_upgrade",
")",
"migrate",
"=",
"Migrate",
"(",
"app",
",",
"db",
",",
"directory",
"=",
"migrations_root",
")",
"migrate",
".",
"init_app",
"(",
"app",
",",
"db",
")",
"migrate_upgrade",
"(",
"migrations_root",
")",
"pass",
"pass"
] |
0fe92059bd868f14da84235beb05b217b1d46e4a
|
test
|
smart_account
|
尝试使用内置方式构建账户
|
fantasy/__init__.py
|
def smart_account(app):
"""尝试使用内置方式构建账户"""
if os.environ['FANTASY_ACTIVE_ACCOUNT'] == 'no':
return
from flask_security import SQLAlchemyUserDatastore, Security
account_module_name, account_class_name = os.environ[
'FANTASY_ACCOUNT_MODEL'].rsplit('.', 1)
account_module = importlib.import_module(account_module_name)
account_class = getattr(account_module, account_class_name)
role_module_name, role_class_name = os.environ[
'FANTASY_ROLE_MODEL'].rsplit('.', 1)
role_module = importlib.import_module(role_module_name)
role_class = getattr(role_module, role_class_name)
r = True if os.environ[
'FANTASY_ACCOUNT_SECURITY_MODE'] != 'no' else False
Security(app,
SQLAlchemyUserDatastore(
app.db, account_class, role_class),
register_blueprint=r)
pass
|
def smart_account(app):
"""尝试使用内置方式构建账户"""
if os.environ['FANTASY_ACTIVE_ACCOUNT'] == 'no':
return
from flask_security import SQLAlchemyUserDatastore, Security
account_module_name, account_class_name = os.environ[
'FANTASY_ACCOUNT_MODEL'].rsplit('.', 1)
account_module = importlib.import_module(account_module_name)
account_class = getattr(account_module, account_class_name)
role_module_name, role_class_name = os.environ[
'FANTASY_ROLE_MODEL'].rsplit('.', 1)
role_module = importlib.import_module(role_module_name)
role_class = getattr(role_module, role_class_name)
r = True if os.environ[
'FANTASY_ACCOUNT_SECURITY_MODE'] != 'no' else False
Security(app,
SQLAlchemyUserDatastore(
app.db, account_class, role_class),
register_blueprint=r)
pass
|
[
"尝试使用内置方式构建账户"
] |
wangwenpei/fantasy
|
python
|
https://github.com/wangwenpei/fantasy/blob/0fe92059bd868f14da84235beb05b217b1d46e4a/fantasy/__init__.py#L80-L105
|
[
"def",
"smart_account",
"(",
"app",
")",
":",
"if",
"os",
".",
"environ",
"[",
"'FANTASY_ACTIVE_ACCOUNT'",
"]",
"==",
"'no'",
":",
"return",
"from",
"flask_security",
"import",
"SQLAlchemyUserDatastore",
",",
"Security",
"account_module_name",
",",
"account_class_name",
"=",
"os",
".",
"environ",
"[",
"'FANTASY_ACCOUNT_MODEL'",
"]",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"account_module",
"=",
"importlib",
".",
"import_module",
"(",
"account_module_name",
")",
"account_class",
"=",
"getattr",
"(",
"account_module",
",",
"account_class_name",
")",
"role_module_name",
",",
"role_class_name",
"=",
"os",
".",
"environ",
"[",
"'FANTASY_ROLE_MODEL'",
"]",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"role_module",
"=",
"importlib",
".",
"import_module",
"(",
"role_module_name",
")",
"role_class",
"=",
"getattr",
"(",
"role_module",
",",
"role_class_name",
")",
"r",
"=",
"True",
"if",
"os",
".",
"environ",
"[",
"'FANTASY_ACCOUNT_SECURITY_MODE'",
"]",
"!=",
"'no'",
"else",
"False",
"Security",
"(",
"app",
",",
"SQLAlchemyUserDatastore",
"(",
"app",
".",
"db",
",",
"account_class",
",",
"role_class",
")",
",",
"register_blueprint",
"=",
"r",
")",
"pass"
] |
0fe92059bd868f14da84235beb05b217b1d46e4a
|
test
|
load_tasks
|
装载任务,解决celery无法自动装载的问题
|
fantasy/__init__.py
|
def load_tasks(app, entry_file=None):
"""装载任务,解决celery无法自动装载的问题"""
from celery import Task
tasks_txt = os.path.join(os.path.dirname(entry_file), 'migrations',
'tasks.txt')
if not os.path.exists(tasks_txt):
import sys
print('Tasks file not found:%s' % tasks_txt)
sys.exit(-1)
class ContextTask(Task):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return super().__call__(*args, **kwargs)
app.celery.config_from_object(app.config, namespace='CELERY')
app.celery.Task = ContextTask
with app.app_context():
with open(tasks_txt, 'r') as f:
for line in f:
mod = line.strip('\n')
if mod:
importlib.import_module(mod + '.tasks')
pass
pass
pass
pass
|
def load_tasks(app, entry_file=None):
"""装载任务,解决celery无法自动装载的问题"""
from celery import Task
tasks_txt = os.path.join(os.path.dirname(entry_file), 'migrations',
'tasks.txt')
if not os.path.exists(tasks_txt):
import sys
print('Tasks file not found:%s' % tasks_txt)
sys.exit(-1)
class ContextTask(Task):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return super().__call__(*args, **kwargs)
app.celery.config_from_object(app.config, namespace='CELERY')
app.celery.Task = ContextTask
with app.app_context():
with open(tasks_txt, 'r') as f:
for line in f:
mod = line.strip('\n')
if mod:
importlib.import_module(mod + '.tasks')
pass
pass
pass
pass
|
[
"装载任务,解决celery无法自动装载的问题"
] |
wangwenpei/fantasy
|
python
|
https://github.com/wangwenpei/fantasy/blob/0fe92059bd868f14da84235beb05b217b1d46e4a/fantasy/__init__.py#L141-L171
|
[
"def",
"load_tasks",
"(",
"app",
",",
"entry_file",
"=",
"None",
")",
":",
"from",
"celery",
"import",
"Task",
"tasks_txt",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"entry_file",
")",
",",
"'migrations'",
",",
"'tasks.txt'",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"tasks_txt",
")",
":",
"import",
"sys",
"print",
"(",
"'Tasks file not found:%s'",
"%",
"tasks_txt",
")",
"sys",
".",
"exit",
"(",
"-",
"1",
")",
"class",
"ContextTask",
"(",
"Task",
")",
":",
"abstract",
"=",
"True",
"def",
"__call__",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"app",
".",
"app_context",
"(",
")",
":",
"return",
"super",
"(",
")",
".",
"__call__",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"app",
".",
"celery",
".",
"config_from_object",
"(",
"app",
".",
"config",
",",
"namespace",
"=",
"'CELERY'",
")",
"app",
".",
"celery",
".",
"Task",
"=",
"ContextTask",
"with",
"app",
".",
"app_context",
"(",
")",
":",
"with",
"open",
"(",
"tasks_txt",
",",
"'r'",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"mod",
"=",
"line",
".",
"strip",
"(",
"'\\n'",
")",
"if",
"mod",
":",
"importlib",
".",
"import_module",
"(",
"mod",
"+",
"'.tasks'",
")",
"pass",
"pass",
"pass",
"pass"
] |
0fe92059bd868f14da84235beb05b217b1d46e4a
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.