INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Return list of sections including students for the passed course ID.
def get_sections_with_students_in_course(self, course_id, params={}): """ Return list of sections including students for the passed course ID. """ include = params.get("include", []) if "students" not in include: include.append("students") params["include"] = include return self.get_sections_in_course(course_id, params)
Return list of sections including students for the passed sis ID.
def get_sections_with_students_in_course_by_sis_id(self, sis_course_id, params={}): """ Return list of sections including students for the passed sis ID. """ return self.get_sections_with_students_in_course( self._sis_id(sis_course_id, sis_field="course"), params)
Create a canvas section in the given course id.
def create_section(self, course_id, name, sis_section_id): """ Create a canvas section in the given course id. https://canvas.instructure.com/doc/api/sections.html#method.sections.create """ url = COURSES_API.format(course_id) + "/sections" body = {"course_section": {"name": name, "sis_section_id": sis_section_id}} return CanvasSection(data=self._post_resource(url, body))
Update a canvas section with the given section id.
def update_section(self, section_id, name, sis_section_id): """ Update a canvas section with the given section id. https://canvas.instructure.com/doc/api/sections.html#method.sections.update """ url = SECTIONS_API.format(section_id) body = {"course_section": {}} if name: body["course_section"]["name"] = name if sis_section_id: body["course_section"]["sis_section_id"] = sis_section_id return CanvasSection(data=self._put_resource(url, body))
List quizzes for a given course
def get_quizzes(self, course_id): """ List quizzes for a given course https://canvas.instructure.com/doc/api/quizzes.html#method.quizzes_api.index """ url = QUIZZES_API.format(course_id) data = self._get_resource(url) quizzes = [] for datum in data: quizzes.append(Quiz(data=datum)) return quizzes
Return account resource for given canvas account id.
def get_account(self, account_id): """ Return account resource for given canvas account id. https://canvas.instructure.com/doc/api/accounts.html#method.accounts.show """ url = ACCOUNTS_API.format(account_id) return CanvasAccount(data=self._get_resource(url))
Return list of subaccounts within the account with the passed canvas id.
def get_sub_accounts(self, account_id, params={}): """ Return list of subaccounts within the account with the passed canvas id. https://canvas.instructure.com/doc/api/accounts.html#method.accounts.sub_accounts """ url = ACCOUNTS_API.format(account_id) + "/sub_accounts" accounts = [] for datum in self._get_paged_resource(url, params=params): accounts.append(CanvasAccount(data=datum)) return accounts
Update the passed account. Returns the updated account.
def update_account(self, account): """ Update the passed account. Returns the updated account. https://canvas.instructure.com/doc/api/accounts.html#method.accounts.update """ url = ACCOUNTS_API.format(account.account_id) body = {"account": {"name": account.name}} return CanvasAccount(data=self._put_resource(url, body))
Updates the SIS ID for the account identified by the passed account ID.
def update_sis_id(self, account_id, sis_account_id): """ Updates the SIS ID for the account identified by the passed account ID. https://canvas.instructure.com/doc/api/accounts.html#method.accounts.update """ if account_id == self._canvas_account_id: raise Exception("SIS ID cannot be updated for the root account") url = ACCOUNTS_API.format(account_id) body = {"account": {"sis_account_id": sis_account_id}} return CanvasAccount(data=self._put_resource(url, body))
Return the authentication settings for the passed account_id.
def get_auth_settings(self, account_id): """ Return the authentication settings for the passed account_id. https://canvas.instructure.com/doc/api/authentication_providers.html#method.account_authorization_configs.show_sso_settings """ url = ACCOUNTS_API.format(account_id) + "/sso_settings" return CanvasSSOSettings(data=self._get_resource(url))
Update the authentication settings for the passed account_id.
def update_auth_settings(self, account_id, auth_settings): """ Update the authentication settings for the passed account_id. https://canvas.instructure.com/doc/api/authentication_providers.html#method.account_authorization_configs.update_sso_settings """ url = ACCOUNTS_API.format(account_id) + "/sso_settings" data = self._put_resource(url, auth_settings.json_data()) return CanvasSSOSettings(data=data)
Calculates the settlement of a shallow foundation ( Schmertmann 19XX ).
def settlement_schmertmann(sp, fd, load, youngs_modulus_soil, **kwargs): """ Calculates the settlement of a shallow foundation (Schmertmann, 19XX). :param sp: Soil Profile object :param fd: Foundation object :param load: :param youngs_modulus_soil: The Young's modulus of the soil. :param kwargs: :return: float, the settlement. """ length = float(fd.length) breadth = float(fd.width) depth = float(fd.depth) load = float(load) sp.gwl = kwargs.get("gwl", sp.gwl) sp.unit_sat_weight = kwargs.get("unit_sat_weight", sp.unit_sat_weight) verbose = kwargs.get("verbose", 0) years = kwargs.get("years", 0) q = load / (length * breadth) sigma_v0_eff = (sp.unit_dry_weight * min(depth, sp.gwl) + (sp.unit_sat_weight - 9.8) * max([0, depth - sp.gwl])) delta_q = q - sigma_v0_eff # EMBEDMENT FACTOR c_1 = max(1 - 0.5 * (sigma_v0_eff / delta_q), 0.5) # CREEP FACTOR if years == 0: c_2 = 1.0 else: c_2 = 1.0 + 0.2 * np.log10(years / 0.1) # SHAPE FACTOR long = max(length, breadth) short = min(length, breadth) c_3 = max(1.03 - 0.03 * (long / short), 0.73) # Peak settlement index if long / short > 10: zp = short + depth z_top = 0.2 z_bottom = 4 * short + depth else: z_top = 0.1 zp = 0.5 * short + depth z_bottom = 2 * short + depth sigma_vp_eff = (sp.unit_dry_weight * min(zp, sp.gwl) + (sp.unit_sat_weight - 9.8) * max([0, zp - sp.gwl])) i_zp = 0.5 + 0.1 * (delta_q / sigma_vp_eff) ** 0.5 i_z_top = (i_zp + z_top) / 2 i_z_bottom = i_zp / 2 settlement = (c_1 * c_2 * c_3 * delta_q * (i_z_top * (zp - depth) + i_z_bottom * (z_bottom - zp)) / youngs_modulus_soil) if verbose: log("delta_q:", delta_q) log("c_1:", c_1) log("c_2:", c_2) log("c_3:", c_3) log("zp:", zp) log("sigma_vp_eff:", sigma_vp_eff) log("i_zp:", i_zp) log("i_z_top:", i_z_top) log("i_z_bottom:", i_z_bottom) log("settlement:", settlement) return settlement
Return all of the terms in the account. https:// canvas. instructure. com/ doc/ api/ enrollment_terms. html#method. terms_api. index
def get_all_terms(self): """ Return all of the terms in the account. https://canvas.instructure.com/doc/api/enrollment_terms.html#method.terms_api.index """ if not self._canvas_account_id: raise MissingAccountID() params = {"workflow_state": 'all', 'per_page': 500} url = ACCOUNTS_API.format(self._canvas_account_id) + "/terms" data_key = 'enrollment_terms' terms = [] response = self._get_paged_resource(url, params, data_key) for data in response[data_key]: terms.append(CanvasTerm(data=data)) return terms
Return a term resource for the passed SIS ID.
def get_term_by_sis_id(self, sis_term_id): """ Return a term resource for the passed SIS ID. """ for term in self.get_all_terms(): if term.sis_term_id == sis_term_id: return term
Update an existing enrollment term for the passed SIS ID. https:// canvas. instructure. com/ doc/ api/ enrollment_terms. html#method. terms. update
def update_term_overrides(self, sis_term_id, overrides={}): """ Update an existing enrollment term for the passed SIS ID. https://canvas.instructure.com/doc/api/enrollment_terms.html#method.terms.update """ if not self._canvas_account_id: raise MissingAccountID() url = ACCOUNTS_API.format( self._canvas_account_id) + "/terms/{}".format( self._sis_id(sis_term_id, sis_field='term')) body = {'enrollment_term': {'overrides': overrides}} return CanvasTerm(data=self._put_resource(url, body))
Produces console output.: param out_str: Output string: param o2: Additional output string: param o3: Additional output string: param o4: Additional output string: return: None
def log(out_str, o2="", o3="", o4=""): """ Produces console output. :param out_str: Output string :param o2: Additional output string :param o3: Additional output string :param o4: Additional output string :return: None """ print(out_str, o2, o3, o4)
Imports a CSV string.
def import_str(self, csv, params={}): """ Imports a CSV string. https://canvas.instructure.com/doc/api/sis_imports.html#method.sis_imports_api.create """ if not self._canvas_account_id: raise MissingAccountID() params["import_type"] = SISImportModel.CSV_IMPORT_TYPE url = SIS_IMPORTS_API.format( self._canvas_account_id) + ".json{}".format(self._params(params)) headers = {"Content-Type": "text/csv"} return SISImportModel(data=self._post_resource(url, headers, csv))
Imports a directory of CSV files.
def import_dir(self, dir_path, params={}): """ Imports a directory of CSV files. https://canvas.instructure.com/doc/api/sis_imports.html#method.sis_imports_api.create """ if not self._canvas_account_id: raise MissingAccountID() body = self._build_archive(dir_path) params["import_type"] = SISImportModel.CSV_IMPORT_TYPE url = SIS_IMPORTS_API.format( self._canvas_account_id) + ".json{}".format(self._params(params)) headers = {"Content-Type": "application/zip"} return SISImportModel(data=self._post_resource(url, headers, body))
Get the status of an already created SIS import.
def get_import_status(self, sis_import): """ Get the status of an already created SIS import. https://canvas.instructure.com/doc/api/sis_imports.html#method.sis_imports_api.show """ if not self._canvas_account_id: raise MissingAccountID() url = SIS_IMPORTS_API.format( self._canvas_account_id) + "/{}.json".format(sis_import.import_id) return SISImportModel(data=self._get_resource(url))
Creates a zip archive from files in path.
def _build_archive(self, dir_path): """ Creates a zip archive from files in path. """ zip_path = os.path.join(dir_path, "import.zip") archive = zipfile.ZipFile(zip_path, "w") for filename in CSV_FILES: filepath = os.path.join(dir_path, filename) if os.path.exists(filepath): archive.write(filepath, filename, zipfile.ZIP_DEFLATED) archive.close() with open(zip_path, "rb") as f: body = f.read() return body
List assignments for a given course
def get_assignments(self, course_id): """ List assignments for a given course https://canvas.instructure.com/doc/api/assignments.html#method.assignments_api.index """ url = ASSIGNMENTS_API.format(course_id) data = self._get_resource(url) assignments = [] for datum in data: assignments.append(Assignment(data=datum)) return assignments
Modify an existing assignment.
def update_assignment(self, assignment): """ Modify an existing assignment. https://canvas.instructure.com/doc/api/assignments.html#method.assignments_api.update """ url = ASSIGNMENTS_API.format(assignment.course_id) + "/{}".format( assignment.assignment_id) data = self._put_resource(url, assignment.json_data()) return Assignment(data=data)
Returns the list of reports for the canvas account id.
def get_available_reports(self, account_id): """ Returns the list of reports for the canvas account id. https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.available_reports """ url = ACCOUNTS_API.format(account_id) + "/reports" report_types = [] for datum in self._get_resource(url): report_types.append(ReportType(data=datum, account_id=account_id)) return report_types
Shows all reports of the passed report_type that have been run for the canvas account id.
def get_reports_by_type(self, account_id, report_type): """ Shows all reports of the passed report_type that have been run for the canvas account id. https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.index """ url = ACCOUNTS_API.format(account_id) + "/reports/{}".format( report_type) reports = [] for datum in self._get_resource(url): datum["account_id"] = account_id reports.append(Report(data=datum)) return reports
Generates a report instance for the canvas account id.
def create_report(self, report_type, account_id, term_id=None, params={}): """ Generates a report instance for the canvas account id. https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.create """ if term_id is not None: params["enrollment_term_id"] = term_id url = ACCOUNTS_API.format(account_id) + "/reports/{}".format( report_type) body = {"parameters": params} data = self._post_resource(url, body) data["account_id"] = account_id return Report(data=data)
Convenience method for create_report for creating a course provisioning report.
def create_course_provisioning_report(self, account_id, term_id=None, params={}): """ Convenience method for create_report, for creating a course provisioning report. """ params["courses"] = True return self.create_report(ReportType.PROVISIONING, account_id, term_id, params)
Convenience method for create_report for creating a course sis export report.
def create_course_sis_export_report(self, account_id, term_id=None, params={}): """ Convenience method for create_report, for creating a course sis export report. """ params["courses"] = True return self.create_report(ReportType.SIS_EXPORT, account_id, term_id, params)
Convenience method for create_report for creating an unused courses report.
def create_unused_courses_report(self, account_id, term_id=None): """ Convenience method for create_report, for creating an unused courses report. """ return self.create_report(ReportType.UNUSED_COURSES, account_id, term_id)
Returns a completed report as a list of csv strings.
def get_report_data(self, report): """ Returns a completed report as a list of csv strings. """ if report.report_id is None or report.status is None: raise ReportFailureException(report) interval = getattr(settings, 'CANVAS_REPORT_POLLING_INTERVAL', 5) while report.status != "complete": if report.status == "error": raise ReportFailureException(report) sleep(interval) report = self.get_report_status(report) if report.attachment is None or report.attachment.url is None: return data = self._get_report_file(report.attachment.url) return data.split("\n")
Returns the status of a report.
def get_report_status(self, report): """ Returns the status of a report. https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.show """ if (report.account_id is None or report.type is None or report.report_id is None): raise ReportFailureException(report) url = ACCOUNTS_API.format(report.account_id) + "/reports/{}/{}".format( report.type, report.report_id) data = self._get_resource(url) data["account_id"] = report.account_id return Report(data=data)
Deletes a generated report instance.
def delete_report(self, report): """ Deletes a generated report instance. https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.destroy """ url = ACCOUNTS_API.format(report.account_id) + "/reports/{}/{}".format( report.type, report.report_id) response = self._delete_resource(url) return True
Crop an image given the top left corner.: param img: The image: param start_y: The top left corner y coord: param start_x: The top left corner x coord: param h: The result height: param w: The result width: return: The cropped image.
def crop_image(img, start_y, start_x, h, w): """ Crop an image given the top left corner. :param img: The image :param start_y: The top left corner y coord :param start_x: The top left corner x coord :param h: The result height :param w: The result width :return: The cropped image. """ return img[start_y:start_y + h, start_x:start_x + w, :].copy()
Move detections in direction dx dy.
def move_detections(label, dy, dx): """ Move detections in direction dx, dy. :param label: The label dict containing all detection lists. :param dy: The delta in y direction as a number. :param dx: The delta in x direction as a number. :return: """ for k in label.keys(): if k.startswith("detection"): detections = label[k] for detection in detections: detection.move_image(-dx, -dy)
Horizontally flip detections according to an image flip.
def hflip_detections(label, w): """ Horizontally flip detections according to an image flip. :param label: The label dict containing all detection lists. :param w: The width of the image as a number. :return: """ for k in label.keys(): if k.startswith("detection"): detections = label[k] for detection in detections: detection.cx = w - detection.cx if k == "detections_2.5d": detection.theta = math.pi - detection.theta
Augment the detection dataset.
def augment_detections(hyper_params, feature, label): """ Augment the detection dataset. In your hyper_parameters.problem.augmentation add configurations to enable features. Supports "enable_horizontal_flip", "enable_micro_translation", "random_crop" : {"shape": { "width", "height" }} and "enable_texture_augmentation". Make sure to also set the "steps" otherwise this method will not be used properly. Random crop ensures at least one detection is in the crop region. Sample configuration "problem": { "augmentation": { "steps": 40, "enable_texture_augmentation": true, "enable_micro_translation": true, "enable_horizontal_flip": true, "random_crop": { "shape": { "width": 256, "height": 256 } } } } :param hyper_params: The hyper parameters object :param feature: A dict containing all features, must be in the style created by detection datasets. :param label: A label dict in the detection dataset style. :return: Modified feature and label dict (copied & modified). """ # Do not augment these ways: # 1) Rotation is not possible # 3) Scaling is not possible, because it ruins depth perception # However, random crops can improve performance. (Training speed and accuracy) if hyper_params.problem.get("augmentation", None) is None: return feature, label img_h, img_w, img_c = feature["image"].shape augmented_feature = {} augmented_label = {} augmented_feature["image"] = feature["image"].copy() if "depth" in feature: augmented_feature["depth"] = feature["depth"].copy() if "calibration" in feature: augmented_feature["calibration"] = feature["calibration"] augmented_feature["hflipped"] = np.array([0], dtype=np.uint8) augmented_feature["crop_offset"] = np.array([0, 0], dtype=np.int8) for k in label.keys(): augmented_label[k] = [detection.copy() for detection in label[k]] if hyper_params.problem.augmentation.get("enable_horizontal_flip", False): if random.random() < 0.5: img_h, img_w, img_c = augmented_feature["image"].shape augmented_feature["image"] = np.fliplr(augmented_feature["image"]) if "depth" in feature: augmented_feature["depth"] = np.fliplr(augmented_feature["depth"]) augmented_feature["hflipped"][0] = 1 hflip_detections(augmented_label, img_w) if hyper_params.problem.augmentation.get("enable_micro_translation", False): img_h, img_w, img_c = augmented_feature["image"].shape dx = int(random.random() * 3) dy = int(random.random() * 3) augmented_feature["image"] = crop_image(augmented_feature["image"], dy, dx, img_h - dy, img_w - dx) if "depth" in feature: augmented_feature["depth"] = crop_image(augmented_feature["depth"], dy, dx, img_h - dy, img_w - dx) augmented_feature["crop_offset"][0] += dy augmented_feature["crop_offset"][1] += dx move_detections(augmented_label, -dy, -dx) if hyper_params.problem.augmentation.get("random_crop", None) is not None: img_h, img_w, img_c = augmented_feature["image"].shape target_w = hyper_params.problem.augmentation.random_crop.shape.width target_h = hyper_params.problem.augmentation.random_crop.shape.height delta_x = max(int(math.ceil((target_w + 1 - img_w) / 2)), 0) delta_y = max(int(math.ceil((target_h + 1 - img_h) / 2)), 0) move_detections(augmented_label, delta_y, delta_x) augmented_feature["image"] = cv2.copyMakeBorder(augmented_feature["image"], delta_y, delta_y, delta_x, delta_x, cv2.BORDER_CONSTANT) img_h, img_w, img_c = augmented_feature["image"].shape start_x = 0 start_y = 0 if len(augmented_label["detections_2d"]) != 0: idx = random.randint(0, len(augmented_label["detections_2d"]) - 1) detection = augmented_label["detections_2d"][idx] start_x = int(detection.cx - random.random() * (target_w - 20) / 2.0 - 10) start_y = int(detection.cy - random.random() * (target_h - 20) / 2.0 - 10) else: start_x = int(img_w * random.random()) start_y = int(img_h * random.random()) # Compute start point so that crop fit's into image and random crop contains detection if start_x < 0: start_x = 0 if start_y < 0: start_y = 0 if start_x >= img_w - target_w: start_x = img_w - target_w - 1 if start_y >= img_h - target_h: start_y = img_h - target_h - 1 # Crop image augmented_feature["image"] = crop_image(augmented_feature["image"], start_y, start_x, target_h, target_w) if "depth" in feature: augmented_feature["depth"] = crop_image(augmented_feature["depth"], start_y, start_x, target_h, target_w) augmented_feature["crop_offset"][0] += start_y augmented_feature["crop_offset"][1] += start_x # Crop labels move_detections(augmented_label, -start_y, -start_x) if hyper_params.problem.augmentation.get("enable_texture_augmentation", False): if random.random() < 0.5: augmented_feature["image"] = full_texture_augmentation(augmented_feature["image"]) return augmented_feature, augmented_label
Edit to get the dict even when the object is a GenericRelatedObjectManager. Added the try except.
def get_dict_from_obj(obj): ''' Edit to get the dict even when the object is a GenericRelatedObjectManager. Added the try except. ''' obj_dict = obj.__dict__ obj_dict_result = obj_dict.copy() for key, value in obj_dict.items(): if key.endswith('_id'): key2 = key.replace('_id', '') try: field, model, direct, m2m = obj._meta.get_field_by_name(key2) if isinstance(field, ForeignKey): obj_dict_result[key2] = obj_dict_result[key] del obj_dict_result[key] except FieldDoesNotExist: pass manytomany_list = obj._meta.many_to_many for manytomany in manytomany_list: pks = [obj_rel.pk for obj_rel in manytomany.value_from_object(obj).select_related()] if pks: obj_dict_result[manytomany.name] = pks return obj_dict_result
Get the arguments given to the template tag element and complete these with the ones from the settings. py if necessary.
def get_config(self, request, **kwargs): """ Get the arguments given to the template tag element and complete these with the ones from the settings.py if necessary. """ config = kwargs config_from_settings = deepcopy(inplace_settings.DEFAULT_INPLACE_EDIT_OPTIONS) config_one_by_one = inplace_settings.DEFAULT_INPLACE_EDIT_OPTIONS_ONE_BY_ONE if not config_one_by_one: # Solution 1: Using default config only if none specified. if not config and config_from_settings: config = config_from_settings else: # Solution 2: Updating the configured config with the default one. config = dict(config_from_settings, **config) return config
Get the text to display when the field is empty.
def empty_value(self): ''' Get the text to display when the field is empty. ''' edit_empty_value = self.config.get('edit_empty_value', False) if edit_empty_value: return edit_empty_value else: return unicode(inplace_settings.INPLACEEDIT_EDIT_EMPTY_VALUE)
Usage: { % eval % } 1 + 1 { % endeval % }
def do_eval(parser, token): "Usage: {% eval %}1 + 1{% endeval %}" nodelist = parser.parse(('endeval',)) class EvalNode(template.Node): def render(self, context): return template.Template(nodelist.render(context)).render(template.Context(context)) parser.delete_first_token() return EvalNode()
Parse uniformly args and kwargs from a templatetag
def parse_args_kwargs(parser, token): """ Parse uniformly args and kwargs from a templatetag Usage:: For parsing a template like this: {% footag my_contents,height=10,zoom=20 as myvar %} You simply do this: @register.tag def footag(parser, token): args, kwargs = parse_args_kwargs(parser, token) """ bits = token.contents.split(' ') if len(bits) <= 1: raise template.TemplateSyntaxError("'%s' takes at least one argument" % bits[0]) if token.contents[13] == '"': end_quote = token.contents.index('"', 14) + 1 args = [template.Variable(token.contents[13:end_quote])] kwargs_start = end_quote else: try: next_space = token.contents.index(' ', 14) kwargs_start = next_space + 1 except ValueError: next_space = None kwargs_start = None args = [template.Variable(token.contents[13:next_space])] kwargs = {} kwargs_list = token.contents[kwargs_start:].split(',') for kwargs_item in kwargs_list: if '=' in kwargs_item: k, v = kwargs_item.split('=', 1) k = k.strip() kwargs[k] = template.Variable(v) return args, kwargs
Create and register metrics from a list of MetricConfigs.
def create_metrics( self, metric_configs: Iterable[MetricConfig]) -> Dict[str, Metric]: """Create and register metrics from a list of MetricConfigs.""" return self.registry.create_metrics(metric_configs)
Setup logging for the application and aiohttp.
def _setup_logging(self, log_level: str): """Setup logging for the application and aiohttp.""" level = getattr(logging, log_level) names = ( 'aiohttp.access', 'aiohttp.internal', 'aiohttp.server', 'aiohttp.web', self.name) for name in names: setup_logger(name=name, stream=sys.stderr, level=level)
Configure the MetricRegistry.
def _configure_registry(self, include_process_stats: bool = False): """Configure the MetricRegistry.""" if include_process_stats: self.registry.register_additional_collector( ProcessCollector(registry=None))
Return a: class: PrometheusExporter configured with args.
def _get_exporter(self, args: argparse.Namespace) -> PrometheusExporter: """Return a :class:`PrometheusExporter` configured with args.""" exporter = PrometheusExporter( self.name, self.description, args.host, args.port, self.registry) exporter.app.on_startup.append(self.on_application_startup) exporter.app.on_shutdown.append(self.on_application_shutdown) return exporter
Create Prometheus metrics from a list of MetricConfigs.
def create_metrics(self, configs: Iterable[MetricConfig]) -> Dict[str, Metric]: """Create Prometheus metrics from a list of MetricConfigs.""" metrics: Dict[str, Metric] = { config.name: self._register_metric(config) for config in configs } self._metrics.update(metrics) return metrics
Return a metric optionally configured with labels.
def get_metric( self, name: str, labels: Union[Dict[str, str], None] = None) -> Metric: """Return a metric, optionally configured with labels.""" metric = self._metrics[name] if labels: return metric.labels(**labels) return metric
Run the: class: aiohttp. web. Application for the exporter.
def run(self): """Run the :class:`aiohttp.web.Application` for the exporter.""" run_app( self.app, host=self.host, port=self.port, print=lambda *args, **kargs: None, access_log_format='%a "%r" %s %b "%{Referrer}i" "%{User-Agent}i"')
Setup an: class: aiohttp. web. Application.
def _make_application(self) -> Application: """Setup an :class:`aiohttp.web.Application`.""" app = Application() app['exporter'] = self app.router.add_get('/', self._handle_home) app.router.add_get('/metrics', self._handle_metrics) app.on_startup.append(self._log_startup_message) return app
Home page request handler.
async def _handle_home(self, request: Request) -> Response: """Home page request handler.""" if self.description: title = f'{self.name} - {self.description}' else: title = self.name text = dedent( f'''<!DOCTYPE html> <html> <head> <title>{title}</title> </head> <body> <h1>{title}</h1> <p> Metric are exported at the <a href="/metrics">/metrics</a> endpoint. </p> </body> </html> ''') return Response(content_type='text/html', text=text)
Handler for metrics.
async def _handle_metrics(self, request: Request) -> Response: """Handler for metrics.""" if self._update_handler: await self._update_handler(self.registry.get_metrics()) response = Response(body=self.registry.generate_metrics()) response.content_type = CONTENT_TYPE_LATEST return response
A free - text query resolver by Wolfram|Alpha. Returns the first result if available.
def wa(client, event, channel, nick, rest): """ A free-text query resolver by Wolfram|Alpha. Returns the first result, if available. """ client = wolframalpha.Client(pmxbot.config['Wolfram|Alpha API key']) res = client.query(rest) return next(res.results).text
Python 2 uses a deprecated method signature and doesn t provide the forward compatibility. Add it.
def fix_HTTPMessage(): """ Python 2 uses a deprecated method signature and doesn't provide the forward compatibility. Add it. """ if six.PY3: return http_client.HTTPMessage.get_content_type = http_client.HTTPMessage.gettype http_client.HTTPMessage.get_param = http_client.HTTPMessage.getparam
Query Wolfram|Alpha using the v2. 0 API
def query(self, input, params=(), **kwargs): """ Query Wolfram|Alpha using the v2.0 API Allows for arbitrary parameters to be passed in the query. For example, to pass assumptions: client.query(input='pi', assumption='*C.pi-_*NamedConstant-') To pass multiple assumptions, pass multiple items as params: params = ( ('assumption', '*C.pi-_*NamedConstant-'), ('assumption', 'DateOrder_**Day.Month.Year--'), ) client.query(input='pi', params=params) For more details on Assumptions, see https://products.wolframalpha.com/api/documentation.html#6 """ data = dict( input=input, appid=self.app_id, ) data = itertools.chain(params, data.items(), kwargs.items()) query = urllib.parse.urlencode(tuple(data)) url = 'https://api.wolframalpha.com/v2/query?' + query resp = urllib.request.urlopen(url) assert resp.headers.get_content_type() == 'text/xml' assert resp.headers.get_param('charset') == 'utf-8' return Result(resp)
The pods assumptions and warnings of this result.
def info(self): """ The pods, assumptions, and warnings of this result. """ return itertools.chain(self.pods, self.assumptions, self.warnings)
The pods that hold the response to a simple discrete query.
def results(self): """ The pods that hold the response to a simple, discrete query. """ return ( pod for pod in self.pods if pod.primary or pod.title == 'Result' )
Add request content data to request body set Content - type header.
def encode(request, data): """ Add request content data to request body, set Content-type header. Should be overridden by subclasses if not using JSON encoding. Args: request (HTTPRequest): The request object. data (dict, None): Data to be encoded. Returns: HTTPRequest: The request object. """ if data is None: return request request.add_header('Content-Type', 'application/json') request.data = json.dumps(data) return request
Call API.
def call_api( self, method, url, headers=None, params=None, data=None, files=None, timeout=None, ): """ Call API. This returns object containing data, with error details if applicable. Args: method (str): The HTTP method to use. url (str): Resource location relative to the base URL. headers (dict or None): Extra request headers to set. params (dict or None): Query-string parameters. data (dict or None): Request body contents for POST or PUT requests. files (dict or None: Files to be passed to the request. timeout (int): Maximum time before timing out. Returns: ResultParser or ErrorParser. """ method = method.upper() headers = deepcopy(headers) or {} headers['Accept'] = self.accept_type params = deepcopy(params) or {} data = data or {} files = files or {} if self.username and self.api_key: params.update(self.get_credentials()) url = urljoin(self.base_url, url) r = requests.request( method, url, headers=headers, params=params, files=files, data=data, timeout=timeout, ) return r, r.status_code
Call the API with a GET request.
def get(self, url, params=None, **kwargs): """ Call the API with a GET request. Args: url (str): Resource location relative to the base URL. params (dict or None): Query-string parameters. Returns: ResultParser or ErrorParser. """ return self.call_api( "GET", url, params=params, **kwargs )
Call the API with a DELETE request.
def delete(self, url, params=None, **kwargs): """ Call the API with a DELETE request. Args: url (str): Resource location relative to the base URL. params (dict or None): Query-string parameters. Returns: ResultParser or ErrorParser. """ return self.call_api( "DELETE", url, params=params, **kwargs )
Call the API with a PUT request.
def put(self, url, params=None, data=None, files=None, **kwargs): """ Call the API with a PUT request. Args: url (str): Resource location relative to the base URL. params (dict or None): Query-string parameters. data (dict or None): Request body contents. files (dict or None: Files to be passed to the request. Returns: An instance of ResultParser or ErrorParser. """ return self.call_api( "PUT", url, params=params, data=data, files=files, **kwargs )
Call the API with a POST request.
def post(self, url, params=None, data=None, files=None, **kwargs): """ Call the API with a POST request. Args: url (str): Resource location relative to the base URL. params (dict or None): Query-string parameters. data (dict or None): Request body contents. files (dict or None: Files to be passed to the request. Returns: An instance of ResultParser or ErrorParser. """ return self.call_api( "POST", url, params=params, data=data, files=files, **kwargs )
Process query recursively if the text is too long it is split and processed bit a bit.
def _process_query(self, query, prepared=False): """ Process query recursively, if the text is too long, it is split and processed bit a bit. Args: query (sdict): Text to be processed. prepared (bool): True when the query is ready to be submitted via POST request. Returns: str: Body ready to be submitted to the API. """ # Exit condition and POST if prepared is True: files = {'query': str(query)} logger.debug('About to submit the following query {}'.format(query)) res, status = self.post( self.disambiguate_service, files=files, headers={'Accept': 'application/json'}, ) if status == 200: return self.decode(res), status else: logger.debug('Disambiguation failed.') return None, status text = query['text'] sentence_coordinates = [ { "offsetStart": 0, "offsetEnd": len(text) } ] total_nb_sentences = len(sentence_coordinates) # Sentences from text. sentences_groups = [] if len(text) > self.max_text_length: res, status_code = self.segment(text) if status_code == 200: sentence_coordinates = res['sentences'] total_nb_sentences = len(sentence_coordinates) else: logger.error('Error during the segmentation of the text.') logger.debug( 'Text too long, split in {} sentences; building groups of {} ' 'sentences.'.format( total_nb_sentences, self.sentences_per_group ) ) sentences_groups = self._group_sentences( total_nb_sentences, self.sentences_per_group ) else: query['sentence'] = "true" if total_nb_sentences > 1: query['sentences'] = sentence_coordinates if len(sentences_groups) > 0: for group in sentences_groups: query['processSentence'] = group res, status_code = self._process_query(query, prepared=True) if status_code == 200: if 'entities' in res: query['entities'] = res[u'entities'] query['language'] = res[u'language'] else: logger.error( "Error when processing the query {}".format(query) ) return None, status_code else: res, status_code = self._process_query(query, prepared=True) if status_code == 200: query['language'] = res[u'language'] if 'entities' in res: query['entities'] = res[u'entities'] else: logger.error("Error when processing the query {}".format(query)) return None, status_code return query, status_code
Split sentences in groups given a specific group length.
def _group_sentences(total_nb_sentences, group_length): """ Split sentences in groups, given a specific group length. Args: total_nb_sentences (int): Total available sentences. group_length (int): Limit of length for each group. Returns: list: Contains groups (lists) of sentences. """ sentences_groups = [] current_sentence_group = [] for i in range(0, total_nb_sentences): if i % group_length == 0: if len(current_sentence_group) > 0: sentences_groups.append(current_sentence_group) current_sentence_group = [i] else: current_sentence_group.append(i) if len(current_sentence_group) > 0: sentences_groups.append(current_sentence_group) return sentences_groups
Call the disambiguation service in order to process a pdf file.
def disambiguate_pdf(self, file, language=None, entities=None): """ Call the disambiguation service in order to process a pdf file . Args: pdf (file): PDF file to be disambiguated. language (str): language of text (if known) Returns: dict, int: API response and API status. """ body = { "customisation": "generic" } if language: body['language'] = {"lang": language} if entities: body['entities'] = entities files = { 'query': str(body), 'file': ( file, open(file, 'rb'), 'application/pdf', {'Expires': '0'} ) } res, status = self.post( self.disambiguate_service, files=files, headers={'Accept': 'application/json'}, ) if status != 200: logger.debug('Disambiguation failed with error ' + str(status)) return self.decode(res), status
Call the disambiguation service in order to get meanings.
def disambiguate_terms(self, terms, language="en", entities=None): """ Call the disambiguation service in order to get meanings. Args: terms (obj): list of objects of term, weight language (str): language of text, english if not specified entities (list): list of entities or mentions to be supplied by the user. Returns: dict, int: API response and API status. """ body = { "termVector": terms, "entities": [], "onlyNER": "false", "customisation": "generic" } body['language'] = {"lang": language} if entities: body['entities'] = entities files = {'query': str(body)} logger.debug('About to submit the following query {}'.format(body)) res, status = self.post( self.disambiguate_service, files=files, headers={'Accept': 'application/json'}, ) if status == 200: return self.decode(res), status else: logger.debug('Disambiguation failed.') return None, status
Call the disambiguation service in order to get meanings.
def disambiguate_text(self, text, language=None, entities=None): """ Call the disambiguation service in order to get meanings. Args: text (str): Text to be disambiguated. language (str): language of text (if known) entities (list): list of entities or mentions to be supplied by the user. Returns: dict, int: API response and API status. """ body = { "text": text, "entities": [], "onlyNER": "false", "customisation": "generic" } if language: body['language'] = {"lang": language} if entities: body['entities'] = entities result, status_code = self._process_query(body) if status_code != 200: logger.debug('Disambiguation failed.') return result, status_code
Call the disambiguation service in order to disambiguate a search query.
def disambiguate_query(self, query, language=None, entities=None): """ Call the disambiguation service in order to disambiguate a search query. Args: text (str): Query to be disambiguated. language (str): language of text (if known) entities (list): list of entities or mentions to be supplied by the user. Returns: dict, int: API response and API status. """ body = { "shortText": query, "entities": [], "onlyNER": "false", "customisation": "generic" } if language: body['language'] = {"lang": language} if entities: body['entities'] = entities files = {'query': str(body)} logger.debug('About to submit the following query {}'.format(body)) res, status = self.post( self.disambiguate_service, files=files, headers={'Accept': 'application/json'}, ) if status == 200: return self.decode(res), status else: logger.debug('Disambiguation failed.') return None, status
Call the segmenter in order to split text in sentences.
def segment(self, text): """ Call the segmenter in order to split text in sentences. Args: text (str): Text to be segmented. Returns: dict, int: A dict containing a list of dicts with the offsets of each sentence; an integer representing the response code. """ files = {'text': text} res, status_code = self.post(self.segmentation_service, files=files) if status_code != 200: logger.debug('Segmentation failed.') return self.decode(res), status_code
Recognise the language of the text in input
def get_language(self, text): """ Recognise the language of the text in input Args: id (str): The text whose the language needs to be recognised Returns: dict, int: A dict containing the recognised language and the confidence score. """ files = {'text': text} res, status_code = self.post(self.language_service, files=files) if status_code != 200: logger.debug('Language recognition failed.') return self.decode(res), status_code
Fetch the concept from the Knowledge base
def get_concept(self, conceptId, lang='en'): """ Fetch the concept from the Knowledge base Args: id (str): The concept id to be fetched, it can be Wikipedia page id or Wikiedata id. Returns: dict, int: A dict containing the concept information; an integer representing the response code. """ url = urljoin(self.concept_service + '/', conceptId) res, status_code = self.get(url, params={'lang': lang}) if status_code != 200: logger.debug('Fetch concept failed.') return self.decode(res), status_code
Constructs the MDR ensemble from the provided training data
def fit(self, features, classes): """Constructs the MDR ensemble from the provided training data Parameters ---------- features: array-like {n_samples, n_features} Feature matrix classes: array-like {n_samples} List of class labels for prediction Returns ------- None """ self.ensemble.fit(features, classes) # Construct the feature map from the ensemble predictions unique_rows = list(set([tuple(row) for row in features])) for row in unique_rows: self.feature_map[row] = self.ensemble.predict([row])[0]
Estimates the accuracy of the predictions from the MDR ensemble
def score(self, features, classes, scoring_function=None, **scoring_function_kwargs): """Estimates the accuracy of the predictions from the MDR ensemble Parameters ---------- features: array-like {n_samples, n_features} Feature matrix to predict from classes: array-like {n_samples} List of true class labels Returns ------- accuracy_score: float The estimated accuracy based on the constructed feature """ new_feature = self.ensemble.predict(features) if scoring_function is None: return accuracy_score(classes, new_feature) else: return scoring_function(classes, new_feature, **scoring_function_kwargs)
Constructs the MDR feature map from the provided training data.
def fit(self, features, class_labels): """Constructs the MDR feature map from the provided training data. Parameters ---------- features: array-like {n_samples, n_features} Feature matrix class_labels: array-like {n_samples} List of true class labels Returns ------- self: A copy of the fitted model """ unique_labels = sorted(np.unique(class_labels)) if len(unique_labels) != 2: raise ValueError('MDR only supports binary endpoints.') # Count the distribution of classes that fall into each MDR grid cell self.class_count_matrix = defaultdict(lambda: defaultdict(int)) for row_i in range(features.shape[0]): feature_instance = tuple(features[row_i]) self.class_count_matrix[feature_instance][class_labels[row_i]] += 1 self.class_count_matrix = dict(self.class_count_matrix) # Only applies to binary classification overall_class_fraction = float(sum(class_labels == unique_labels[0])) / class_labels.size # If one class is more abundant in a MDR grid cell than it is overall, then assign the cell to that class self.feature_map = {} for feature_instance in self.class_count_matrix: counts = self.class_count_matrix[feature_instance] fraction = float(counts[unique_labels[0]]) / np.sum(list(counts.values())) if fraction > overall_class_fraction: self.feature_map[feature_instance] = unique_labels[0] elif fraction == overall_class_fraction: self.feature_map[feature_instance] = self.tie_break else: self.feature_map[feature_instance] = unique_labels[1] return self
Convenience function that fits the provided data then constructs a new feature from the provided features.
def fit_transform(self, features, class_labels): """Convenience function that fits the provided data then constructs a new feature from the provided features. Parameters ---------- features: array-like {n_samples, n_features} Feature matrix class_labels: array-like {n_samples} List of true class labels Returns ---------- array-like: {n_samples, 1} Constructed features from the provided feature matrix """ self.fit(features, class_labels) return self.transform(features)
Convenience function that fits the provided data then constructs predictions from the provided features.
def fit_predict(self, features, class_labels): """Convenience function that fits the provided data then constructs predictions from the provided features. Parameters ---------- features: array-like {n_samples, n_features} Feature matrix class_labels: array-like {n_samples} List of true class labels Returns ---------- array-like: {n_samples} Constructed features from the provided feature matrix """ self.fit(features, class_labels) return self.predict(features)
Estimates the accuracy of the predictions from the constructed feature.
def score(self, features, class_labels, scoring_function=None, **scoring_function_kwargs): """Estimates the accuracy of the predictions from the constructed feature. Parameters ---------- features: array-like {n_samples, n_features} Feature matrix to predict from class_labels: array-like {n_samples} List of true class labels Returns ------- accuracy_score: float The estimated accuracy based on the constructed feature """ if self.feature_map is None: raise ValueError('The MDR model must be fit before score can be called.') new_feature = self.predict(features) if scoring_function is None: return accuracy_score(class_labels, new_feature) else: return scoring_function(class_labels, new_feature, **scoring_function_kwargs)
Constructs the Continuous MDR feature map from the provided training data.
def fit(self, features, targets): """Constructs the Continuous MDR feature map from the provided training data. Parameters ---------- features: array-like {n_samples, n_features} Feature matrix targets: array-like {n_samples} List of target values for prediction Returns ------- self: A copy of the fitted model """ self.feature_map = defaultdict(lambda: self.default_label) self.overall_mean_trait_value = np.mean(targets) self.mdr_matrix_values = defaultdict(list) for row_i in range(features.shape[0]): feature_instance = tuple(features[row_i]) self.mdr_matrix_values[feature_instance].append(targets[row_i]) for feature_instance in self.mdr_matrix_values: grid_mean_trait_value = np.mean(self.mdr_matrix_values[feature_instance]) if grid_mean_trait_value > self.overall_mean_trait_value: self.feature_map[feature_instance] = 1 elif grid_mean_trait_value == self.overall_mean_trait_value: self.feature_map[feature_instance] = self.tie_break else: self.feature_map[feature_instance] = 0 # Convert defaultdict to dict so CMDR objects can be easily pickled self.feature_map = dict(self.feature_map) self.mdr_matrix_values = dict(self.mdr_matrix_values) return self
Uses the Continuous MDR feature map to construct a new feature from the provided features.
def transform(self, features): """Uses the Continuous MDR feature map to construct a new feature from the provided features. Parameters ---------- features: array-like {n_samples, n_features} Feature matrix to transform Returns ---------- array-like: {n_samples} Constructed feature from the provided feature matrix The constructed feature will be a binary variable, taking the values 0 and 1 """ new_feature = np.zeros(features.shape[0], dtype=np.int) for row_i in range(features.shape[0]): feature_instance = tuple(features[row_i]) if feature_instance in self.feature_map: new_feature[row_i] = self.feature_map[feature_instance] else: new_feature[row_i] = self.default_label return new_feature.reshape(features.shape[0], 1)
Convenience function that fits the provided data then constructs a new feature from the provided features.
def fit_transform(self, features, targets): """Convenience function that fits the provided data then constructs a new feature from the provided features. Parameters ---------- features: array-like {n_samples, n_features} Feature matrix targets: array-like {n_samples} List of true target values Returns ---------- array-like: {n_samples} Constructed features from the provided feature matrix """ self.fit(features, targets) return self.transform(features)
Estimates the quality of the ContinuousMDR model using a t - statistic.
def score(self, features, targets): """Estimates the quality of the ContinuousMDR model using a t-statistic. Parameters ---------- features: array-like {n_samples, n_features} Feature matrix to predict from targets: array-like {n_samples} List of true target values Returns ------- quality_score: float The estimated quality of the Continuous MDR model """ if self.feature_map is None: raise ValueError('The Continuous MDR model must be fit before score() can be called.') group_0_trait_values = [] group_1_trait_values = [] for feature_instance in self.feature_map: if self.feature_map[feature_instance] == 0: group_0_trait_values.extend(self.mdr_matrix_values[feature_instance]) else: group_1_trait_values.extend(self.mdr_matrix_values[feature_instance]) return abs(ttest_ind(group_0_trait_values, group_1_trait_values).statistic)
Calculates the entropy H ( X ) in the given base
def entropy(X, base=2): """Calculates the entropy, H(X), in the given base Parameters ---------- X: array-like (# samples) An array of values for which to compute the entropy base: integer (default: 2) The base in which to calculate entropy Returns ---------- entropy: float The entropy calculated according to the equation H(X) = -sum(p_x * log p_x) for all states of X """ return scipy.stats.entropy(list(Counter(X).values()), base=base)
Calculates the joint entropy H ( X Y ) in the given base
def joint_entropy(X, Y, base=2): """Calculates the joint entropy, H(X,Y), in the given base Parameters ---------- X: array-like (# samples) An array of values for which to compute the joint entropy Y: array-like (# samples) An array of values for which to compute the joint entropy base: integer (default: 2) The base in which to calculate joint entropy Returns ---------- joint_entropy: float The joint entropy calculated according to the equation H(X,Y) = -sum(p_xy * log p_xy) for all combined states of X and Y """ X_Y = ['{}{}'.format(x, y) for x, y in zip(X, Y)] return entropy(X_Y, base=base)
Calculates the conditional entropy H ( X|Y ) in the given base
def conditional_entropy(X, Y, base=2): """Calculates the conditional entropy, H(X|Y), in the given base Parameters ---------- X: array-like (# samples) An array of values for which to compute the conditional entropy Y: array-like (# samples) An array of values for which to compute the conditional entropy base: integer (default: 2) The base in which to calculate conditional entropy Returns ---------- conditional_entropy: float The conditional entropy calculated according to the equation H(X|Y) = H(X,Y) - H(Y) """ return joint_entropy(X, Y, base=base) - entropy(Y, base=base)
Calculates the mutual information between two variables I ( X ; Y ) in the given base
def mutual_information(X, Y, base=2): """Calculates the mutual information between two variables, I(X;Y), in the given base Parameters ---------- X: array-like (# samples) An array of values for which to compute the mutual information Y: array-like (# samples) An array of values for which to compute the mutual information base: integer (default: 2) The base in which to calculate mutual information Returns ---------- mutual_information: float The mutual information calculated according to the equation I(X;Y) = H(Y) - H(Y|X) """ return entropy(Y, base=base) - conditional_entropy(Y, X, base=base)
Calculates the two - way information gain between three variables I ( X ; Y ; Z ) in the given base
def two_way_information_gain(X, Y, Z, base=2): """Calculates the two-way information gain between three variables, I(X;Y;Z), in the given base IG(X;Y;Z) indicates the information gained about variable Z by the joint variable X_Y, after removing the information that X and Y have about Z individually. Thus, two-way information gain measures the synergistic predictive value of variables X and Y about variable Z. Parameters ---------- X: array-like (# samples) An array of values for which to compute the 2-way information gain Y: array-like (# samples) An array of values for which to compute the 2-way information gain Z: array-like (# samples) An array of outcome values for which to compute the 2-way information gain base: integer (default: 2) The base in which to calculate 2-way information Returns ---------- mutual_information: float The information gain calculated according to the equation IG(X;Y;Z) = I(X,Y;Z) - I(X;Z) - I(Y;Z) """ X_Y = ['{}{}'.format(x, y) for x, y in zip(X, Y)] return (mutual_information(X_Y, Z, base=base) - mutual_information(X, Z, base=base) - mutual_information(Y, Z, base=base))
Calculates the three - way information gain between three variables I ( W ; X ; Y ; Z ) in the given base
def three_way_information_gain(W, X, Y, Z, base=2): """Calculates the three-way information gain between three variables, I(W;X;Y;Z), in the given base IG(W;X;Y;Z) indicates the information gained about variable Z by the joint variable W_X_Y, after removing the information that W, X, and Y have about Z individually and jointly in pairs. Thus, 3-way information gain measures the synergistic predictive value of variables W, X, and Y about variable Z. Parameters ---------- W: array-like (# samples) An array of values for which to compute the 3-way information gain X: array-like (# samples) An array of values for which to compute the 3-way information gain Y: array-like (# samples) An array of values for which to compute the 3-way information gain Z: array-like (# samples) An array of outcome values for which to compute the 3-way information gain base: integer (default: 2) The base in which to calculate 3-way information Returns ---------- mutual_information: float The information gain calculated according to the equation: IG(W;X;Y;Z) = I(W,X,Y;Z) - IG(W;X;Z) - IG(W;Y;Z) - IG(X;Y;Z) - I(W;Z) - I(X;Z) - I(Y;Z) """ W_X_Y = ['{}{}{}'.format(w, x, y) for w, x, y in zip(W, X, Y)] return (mutual_information(W_X_Y, Z, base=base) - two_way_information_gain(W, X, Z, base=base) - two_way_information_gain(W, Y, Z, base=base) - two_way_information_gain(X, Y, Z, base=base) - mutual_information(W, Z, base=base) - mutual_information(X, Z, base=base) - mutual_information(Y, Z, base=base))
Fits a MDR model to variables X and Y with the given labels then returns the resulting predictions
def _mdr_predict(X, Y, labels): """Fits a MDR model to variables X and Y with the given labels, then returns the resulting predictions This is a convenience method that should only be used internally. Parameters ---------- X: array-like (# samples) An array of values corresponding to one feature in the MDR model Y: array-like (# samples) An array of values corresponding to one feature in the MDR model labels: array-like (# samples) The class labels corresponding to features X and Y Returns ---------- predictions: array-like (# samples) The predictions from the fitted MDR model """ return MDR().fit_predict(np.column_stack((X, Y)), labels)
Calculates the MDR entropy H ( XY ) in the given base
def mdr_entropy(X, Y, labels, base=2): """Calculates the MDR entropy, H(XY), in the given base MDR entropy is calculated by combining variables X and Y into a single MDR model then calculating the entropy of the resulting model's predictions. Parameters ---------- X: array-like (# samples) An array of values corresponding to one feature in the MDR model Y: array-like (# samples) An array of values corresponding to one feature in the MDR model labels: array-like (# samples) The class labels corresponding to features X and Y base: integer (default: 2) The base in which to calculate MDR entropy Returns ---------- mdr_entropy: float The MDR entropy calculated according to the equation H(XY) = -sum(p_xy * log p_xy) for all output states of the MDR model """ return entropy(_mdr_predict(X, Y, labels), base=base)
Calculates the MDR conditional entropy H ( XY|labels ) in the given base
def mdr_conditional_entropy(X, Y, labels, base=2): """Calculates the MDR conditional entropy, H(XY|labels), in the given base MDR conditional entropy is calculated by combining variables X and Y into a single MDR model then calculating the entropy of the resulting model's predictions conditional on the provided labels. Parameters ---------- X: array-like (# samples) An array of values corresponding to one feature in the MDR model Y: array-like (# samples) An array of values corresponding to one feature in the MDR model labels: array-like (# samples) The class labels corresponding to features X and Y base: integer (default: 2) The base in which to calculate MDR conditional entropy Returns ---------- mdr_conditional_entropy: float The MDR conditional entropy calculated according to the equation H(XY|labels) = H(XY,labels) - H(labels) """ return conditional_entropy(_mdr_predict(X, Y, labels), labels, base=base)
Calculates the MDR mutual information I ( XY ; labels ) in the given base
def mdr_mutual_information(X, Y, labels, base=2): """Calculates the MDR mutual information, I(XY;labels), in the given base MDR mutual information is calculated by combining variables X and Y into a single MDR model then calculating the mutual information between the resulting model's predictions and the labels. Parameters ---------- X: array-like (# samples) An array of values corresponding to one feature in the MDR model Y: array-like (# samples) An array of values corresponding to one feature in the MDR model labels: array-like (# samples) The class labels corresponding to features X and Y base: integer (default: 2) The base in which to calculate MDR mutual information Returns ---------- mdr_mutual_information: float The MDR mutual information calculated according to the equation I(XY;labels) = H(labels) - H(labels|XY) """ return mutual_information(_mdr_predict(X, Y, labels), labels, base=base)
Fits a MDR model to all n - way combinations of the features in X.
def n_way_models(mdr_instance, X, y, n=[2], feature_names=None): """Fits a MDR model to all n-way combinations of the features in X. Note that this function performs an exhaustive search through all feature combinations and can be computationally expensive. Parameters ---------- mdr_instance: object An instance of the MDR type to use. X: array-like (# rows, # features) NumPy matrix containing the features y: array-like (# rows, 1) NumPy matrix containing the target values n: list (default: [2]) The maximum size(s) of the MDR model to generate. e.g., if n == [3], all 3-way models will be generated. feature_names: list (default: None) The corresponding names of the features in X. If None, then the features will be named according to their order. Returns ---------- (fitted_model, fitted_model_score, fitted_model_features): tuple of (list, list, list) fitted_model contains the MDR model fitted to the data. fitted_model_score contains the training scores corresponding to the fitted MDR model. fitted_model_features contains a list of the names of the features that were used in the corresponding model. """ if feature_names is None: feature_names = list(range(X.shape[1])) for cur_n in n: for features in itertools.combinations(range(X.shape[1]), cur_n): mdr_model = copy.deepcopy(mdr_instance) mdr_model.fit(X[:, features], y) mdr_model_score = mdr_model.score(X[:, features], y) model_features = [feature_names[feature] for feature in features] yield mdr_model, mdr_model_score, model_features
Visualizes the MDR grid of a given fitted MDR instance. Only works for 2 - way MDR models. This function is currently incomplete.
def plot_mdr_grid(mdr_instance): """Visualizes the MDR grid of a given fitted MDR instance. Only works for 2-way MDR models. This function is currently incomplete. Parameters ---------- mdr_instance: object A fitted instance of the MDR type to visualize. Returns ---------- fig: matplotlib.figure Figure object for the visualized MDR grid. """ var1_levels = list(set([variables[0] for variables in mdr_instance.feature_map])) var2_levels = list(set([variables[1] for variables in mdr_instance.feature_map])) max_count = np.array(list(mdr_instance.class_count_matrix.values())).flatten().max() """ TODO: - Add common axis labels - Make sure this scales for smaller and larger record sizes - Extend to 3-way+ models, e.g., http://4.bp.blogspot.com/-vgKCjEkWFUc/UPwPuHo6XvI/AAAAAAAAAE0/fORHqDcoikE/s1600/model.jpg """ fig, splots = plt.subplots(ncols=len(var1_levels), nrows=len(var2_levels), sharey=True, sharex=True) fig.set_figwidth(6) fig.set_figheight(6) for (var1, var2) in itertools.product(var1_levels, var2_levels): class_counts = mdr_instance.class_count_matrix[(var1, var2)] splot = splots[var2_levels.index(var2)][var1_levels.index(var1)] splot.set_yticks([]) splot.set_xticks([]) splot.set_ylim(0, max_count * 1.5) splot.set_xlim(-0.5, 1.5) if var2_levels.index(var2) == 0: splot.set_title('X1 = {}'.format(var1), fontsize=12) if var1_levels.index(var1) == 0: splot.set_ylabel('X2 = {}'.format(var2), fontsize=12) bars = splot.bar(left=range(class_counts.shape[0]), height=class_counts, width=0.5, color='black', align='center') bgcolor = 'lightgrey' if mdr_instance.feature_map[(var1, var2)] == 0 else 'darkgrey' splot.set_axis_bgcolor(bgcolor) for index, bar in enumerate(bars): splot.text(index, class_counts[index] + (max_count * 0.1), class_counts[index], ha='center') fig.tight_layout() return fig
等价于 django makemigrations 操作
def makemigrations(migrations_root): """等价于 django makemigrations 操作""" from flask_migrate import (Migrate, init as migrate_init, migrate as migrate_exec) migrations_root = migrations_root or os.path.join( os.environ.get('FANTASY_MIGRATION_PATH', os.getcwd()), 'migrations') migrations_root = os.path.expanduser(migrations_root) mig = Migrate(app, app.db, directory=migrations_root) if not os.path.exists(migrations_root): migrate_init(migrations_root) pass models_file = os.path.join(migrations_root, 'models.txt') if not os.path.exists(models_file): with open(models_file, 'w') as fw: fw.write('# add module name in this file.') pass pass with open(models_file, 'r') as fp: modules = fp.readlines() pass modules = filter(lambda x: x.strip("\n"), modules) modules = map(lambda x: x.strip("\n").split("#")[0].strip(), modules) modules = list(filter(lambda x: x, modules)) if not modules: click.echo( click.style('No models found,' 'skip create migrations...' 'You need edit models.txt file set your module', fg='yellow')) sys.exit(0) for m in modules: importlib.import_module(m + '.models') pass migrate_exec(migrations_root) mig.init_app(app, app.db) pass
等价于 django migrate 操作
def migrate(migrations_root): """等价于 django migrate 操作""" from flask_migrate import Migrate, upgrade as migrate_upgrade from flask_sqlalchemy import SQLAlchemy from sqlalchemy.engine.url import make_url from sqlalchemy_utils import database_exists, create_database db = SQLAlchemy() dsn = make_url(app.config['SQLALCHEMY_DATABASE_URI']) if not database_exists(dsn): create_database(dsn) pass migrations_root = migrations_root or os.path.join( os.environ.get('FANTASY_MIGRATION_PATH', os.getcwd()), 'migrations') migrations_root = os.path.expanduser(migrations_root) if os.path.exists(migrations_root): mig = Migrate(app, db, directory=migrations_root) mig.init_app(app, db) migrate_upgrade(migrations_root) else: click.echo( click.style('migration files not exist,skip migrate...', fg='red')) sys.exit(-1) pass
编译全新依赖文件
def requirements(work_dir, hive_root, with_requirements, with_dockerfile, active_module, active_module_file): """编译全新依赖文件""" import sys sys.path.insert(0, hive_root) hive_root = os.path.abspath(os.path.expanduser(hive_root)) work_dir = work_dir or os.path.join( os.environ.get('FANTASY_APP_PATH', os.getcwd())) work_dir = os.path.expanduser(work_dir) requirements_root = os.path.join(work_dir, 'requirements') migrate_root = os.path.join(work_dir, 'migrations') # active_modules 严格按照顺序 active_module_paths = [] active_module_list = [] if active_module_file: with open(active_module_file, 'r') as fp: for l in fp: pkg = l.split('#')[0].strip() if pkg: active_module_list.append(l.strip("\n")) pass active_module_list += active_module for m in active_module_list: try: mod = importlib.import_module(m) active_module_paths.append(os.path.dirname(mod.__file__)) except ImportError: click.echo('module "%s" not found.' % m, color="yellow") pass pass def build_requirements(): """构造requirements文件 requirements文件共分为两份: - hive.txt 从hive项目中直接复制 - hive-modules.txt 从指定的模块中装载依赖项 .. note:: requirements要求必须是顺序无关的 因为我们会使用set来去重,并按照value排序 """ if not os.path.exists(requirements_root): os.makedirs(requirements_root) pass click.echo(click.style("Generate hive requirements...", fg="yellow")) shutil.copy( os.path.join(hive_root, 'requirements.txt'), os.path.join(requirements_root, 'hive.txt') ) click.echo(click.style("Generate hive-module requirements...", fg="yellow")) requirements_files = [] for m in active_module_paths: t = os.path.join(m, 'requirements.txt') if os.path.exists(t): requirements_files.append(t) pass module_packages = set() with fileinput.input(requirements_files) as fp: for line in fp: pkg = line.split('#')[0].strip() if pkg: module_packages.add(pkg) pass with click.open_file( os.path.join(requirements_root, 'hive-modules.txt'), 'w') as fp: for p in module_packages: fp.write("%s\n" % p) pass pass def build_dockerfile(): """构造Dockerfile""" modules_in_hive = map( lambda x: x.replace(hive_root, '').lstrip('/'), filter(lambda x: x.startswith(hive_root), active_module_paths)) modules_path = ' '.join(modules_in_hive) docker_file = os.path.join( os.path.dirname(requirements_root), 'Dockerfile' ) # update Dockerfile if os.path.exists(docker_file): click.echo(click.style("Found Dockerfile,try update...", fg="yellow")) with open(docker_file, 'r') as fp: buffer = fp.read() pass import re replaced = re.sub('ARG HIVE_PACKAGES=".*"', 'ARG HIVE_PACKAGES="%s"' % modules_path, buffer) with open(docker_file, 'w') as fp: fp.write(replaced) pass pass pass def build_migrations(): models_pairs = filter( lambda pair: os.path.exists(pair[0]), map(lambda x: (os.path.join(x[0], 'models.py'), x[1]), [(v, active_module_list[i]) for i, v in enumerate(active_module_paths)])) try: _, models = zip(*models_pairs) except ValueError: click.echo(click.style("No models found," "is it include in " "your PYTHONPATH?\n" "Modules: %s" % ','.join(active_module_list), fg="yellow")) return click.echo(click.style("Found models.txt,try update...", fg="yellow")) with open(os.path.join(migrate_root, 'models.txt'), 'w') as fp: for p in models: fp.write("%s\n" % p) pass pass def build_tasks(): tasks_pairs = filter( lambda pair: os.path.exists(pair[0]), map(lambda x: (os.path.join(x[0], 'tasks.py'), x[1]), [(v, active_module_list[i]) for i, v in enumerate(active_module_paths)])) try: _, tasks = zip(*tasks_pairs) except ValueError: click.echo(click.style("No tasks found," "is it include in " "your PYTHONPATH?\n" "Modules: %s" % ','.join(active_module_list), fg="yellow")) return click.echo(click.style("Found tasks.txt,try update...", fg="yellow")) with open(os.path.join(migrate_root, 'tasks.txt'), 'w') as fp: for p in tasks: fp.write("%s\n" % p) pass if with_requirements: build_requirements() if with_dockerfile: build_dockerfile() if os.path.exists(migrate_root): build_migrations() if os.path.exists(migrate_root): build_tasks() click.echo(click.style("Generate done...", fg="yellow")) pass
启动队列服务 [ 开发中 ]
def queue(celery_arguments): """启动队列服务[开发中]""" if not app.celery: return click.echo( click.style('No celery config found,skip start...', fg='yellow')) celery = app.celery celery.autodiscover_tasks() argv = celery_arguments.split() argv.insert(0, 'worker') argv.insert(0, 'Queue') celery.worker_main(argv) pass
尝试对数据库做初始化操作
def smart_database(app): """尝试对数据库做初始化操作""" from sqlalchemy.engine.url import make_url from sqlalchemy_utils import database_exists, create_database # 如果数据库不存在,则尝试创建数据 dsn = make_url(app.config['SQLALCHEMY_DATABASE_URI']) if not database_exists(dsn): create_database(dsn) pass pass
如果存在migration且指定为primary_node则执行migrate操作
def smart_migrate(app, migrations_root): """如果存在migration且指定为primary_node则执行migrate操作""" db = app.db if os.path.exists(migrations_root) and \ os.environ['FANTASY_PRIMARY_NODE'] != 'no': from flask_migrate import (Migrate, upgrade as migrate_upgrade) migrate = Migrate(app, db, directory=migrations_root) migrate.init_app(app, db) migrate_upgrade(migrations_root) pass pass
尝试使用内置方式构建账户
def smart_account(app): """尝试使用内置方式构建账户""" if os.environ['FANTASY_ACTIVE_ACCOUNT'] == 'no': return from flask_security import SQLAlchemyUserDatastore, Security account_module_name, account_class_name = os.environ[ 'FANTASY_ACCOUNT_MODEL'].rsplit('.', 1) account_module = importlib.import_module(account_module_name) account_class = getattr(account_module, account_class_name) role_module_name, role_class_name = os.environ[ 'FANTASY_ROLE_MODEL'].rsplit('.', 1) role_module = importlib.import_module(role_module_name) role_class = getattr(role_module, role_class_name) r = True if os.environ[ 'FANTASY_ACCOUNT_SECURITY_MODE'] != 'no' else False Security(app, SQLAlchemyUserDatastore( app.db, account_class, role_class), register_blueprint=r) pass
装载任务,解决celery无法自动装载的问题
def load_tasks(app, entry_file=None): """装载任务,解决celery无法自动装载的问题""" from celery import Task tasks_txt = os.path.join(os.path.dirname(entry_file), 'migrations', 'tasks.txt') if not os.path.exists(tasks_txt): import sys print('Tasks file not found:%s' % tasks_txt) sys.exit(-1) class ContextTask(Task): abstract = True def __call__(self, *args, **kwargs): with app.app_context(): return super().__call__(*args, **kwargs) app.celery.config_from_object(app.config, namespace='CELERY') app.celery.Task = ContextTask with app.app_context(): with open(tasks_txt, 'r') as f: for line in f: mod = line.strip('\n') if mod: importlib.import_module(mod + '.tasks') pass pass pass pass