hexsha
stringlengths
40
40
size
int64
4
1.02M
ext
stringclasses
8 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
4
209
max_stars_repo_name
stringlengths
5
121
max_stars_repo_head_hexsha
stringlengths
40
40
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
4
209
max_issues_repo_name
stringlengths
5
121
max_issues_repo_head_hexsha
stringlengths
40
40
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
4
209
max_forks_repo_name
stringlengths
5
121
max_forks_repo_head_hexsha
stringlengths
40
40
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
4
1.02M
avg_line_length
float64
1.07
66.1k
max_line_length
int64
4
266k
alphanum_fraction
float64
0.01
1
4c973e13e6a3ad8c93e05e6eb0d55b8605e8429e
65
py
Python
__init__.py
MahmoudRamada/clin
f56376ecaa590f4b06d01d6f80cfd574764b811e
[ "Apache-2.0" ]
null
null
null
__init__.py
MahmoudRamada/clin
f56376ecaa590f4b06d01d6f80cfd574764b811e
[ "Apache-2.0" ]
1
2019-10-06T17:56:23.000Z
2019-10-06T17:56:23.000Z
__init__.py
MahmoudRamada/clin
f56376ecaa590f4b06d01d6f80cfd574764b811e
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- from . import models from . import wizard
21.666667
23
0.646154
da97ceb3a62d8e4125edcbc326110377b5dacbf2
1,744
py
Python
reconcile/jenkins_job_cleaner.py
BumbleFeng/qontract-reconcile
a9f5bb221df5f92727d0582c5aa1a99822883782
[ "Apache-2.0" ]
21
2019-10-22T16:50:26.000Z
2022-03-25T16:07:44.000Z
reconcile/jenkins_job_cleaner.py
BumbleFeng/qontract-reconcile
a9f5bb221df5f92727d0582c5aa1a99822883782
[ "Apache-2.0" ]
629
2018-12-13T12:46:53.000Z
2022-03-31T20:46:02.000Z
reconcile/jenkins_job_cleaner.py
BumbleFeng/qontract-reconcile
a9f5bb221df5f92727d0582c5aa1a99822883782
[ "Apache-2.0" ]
50
2019-01-04T10:22:45.000Z
2022-03-28T13:24:27.000Z
import logging from reconcile import queries from reconcile.jenkins_job_builder import init_jjb from reconcile.utils.jenkins_api import JenkinsApi QONTRACT_INTEGRATION = 'jenkins-job-cleaner' def get_managed_job_names(job_names, managed_projects): managed_jobs = set() for job_name in job_names: for managed_project in managed_projects: if job_name.startswith(managed_project): managed_jobs.add(job_name) return list(managed_jobs) def get_desired_job_names(instance_name): jjb, _ = init_jjb() desired_jobs = \ jjb.get_all_jobs(instance_name=instance_name, include_test=True)[instance_name] return [j['name'] for j in desired_jobs] def run(dry_run): jenkins_instances = queries.get_jenkins_instances() settings = queries.get_app_interface_settings() for instance in jenkins_instances: if instance.get('deleteMethod') != 'manual': continue managed_projects = instance.get('managedProjects') if not managed_projects: continue instance_name = instance['name'] token = instance['token'] jenkins = JenkinsApi(token, ssl_verify=False, settings=settings) all_job_names = jenkins.get_job_names() managed_job_names = \ get_managed_job_names(all_job_names, managed_projects) desired_job_names = get_desired_job_names(instance_name) delete_job_names = [j for j in managed_job_names if j not in desired_job_names] for job_name in delete_job_names: logging.info(['delete_job', instance_name, job_name]) if not dry_run: jenkins.delete_job(job_name)
32.296296
72
0.681193
74e3ee009219a940ab1b9062b67597ed34ea0dcc
423
py
Python
openapi_core/__init__.py
niteoweb/openapi-core
ed39ee8fcf6a9e5c43411f891ec028a5f84bc0d8
[ "BSD-3-Clause" ]
null
null
null
openapi_core/__init__.py
niteoweb/openapi-core
ed39ee8fcf6a9e5c43411f891ec028a5f84bc0d8
[ "BSD-3-Clause" ]
null
null
null
openapi_core/__init__.py
niteoweb/openapi-core
ed39ee8fcf6a9e5c43411f891ec028a5f84bc0d8
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- """OpenAPI core module""" from openapi_core.shortcuts import ( create_spec, validate_parameters, validate_body, validate_data, ) __author__ = 'Artur Maciag' __email__ = 'maciag.artur@gmail.com' __version__ = '0.9.0' __url__ = 'https://github.com/p1c2u/openapi-core' __license__ = 'BSD 3-Clause License' __all__ = [ 'create_spec', 'validate_parameters', 'validate_body', 'validate_data', ]
26.4375
75
0.72104
74de198630183245b7e958eb55d166e49beb01ee
3,305
py
Python
scripts/create_fluseverity_figs_v5/ILINet_zRR_time_v5.py
eclee25/flu-SDI-exploratory-age
2f5a4d97b84d2116e179e85fe334edf4556aa946
[ "MIT" ]
3
2018-03-29T23:02:43.000Z
2020-08-10T12:01:50.000Z
scripts/create_fluseverity_figs_v5/ILINet_zRR_time_v5.py
eclee25/flu-SDI-exploratory-age
2f5a4d97b84d2116e179e85fe334edf4556aa946
[ "MIT" ]
null
null
null
scripts/create_fluseverity_figs_v5/ILINet_zRR_time_v5.py
eclee25/flu-SDI-exploratory-age
2f5a4d97b84d2116e179e85fe334edf4556aa946
[ "MIT" ]
null
null
null
#!/usr/bin/python ############################################## ###Python template ###Author: Elizabeth Lee ###Date: 11/4/14 ###Function: RR of incidence in adults to incidence in children vs. week number normalized by the first 'gp_normweeks' of the season. Incidence in children and adults is normalized by the size of the child and adult populations in the second calendar year of the flu season. ILINet data # 11/4 convert to v5: covCare adjustment, RR, a:c # 7/21/15: update notation # 7/24/15: update CDC notation ###Import data: CDC_Source/Import_Data/all_cdc_source_data.csv, Census/Import_Data/totalpop_age_Census_98-14.csv ###Command Line: python ILINet_zRR_time_v5.py ############################################## ### notes ### # Incidence per 100,000 is normalized by total population by second calendar year of the flu season # 2013-14 ILINet data is normalized by estimated population size from December 2013 because 2014 estimates are not available at this time ### packages/modules ### import csv import matplotlib.pyplot as plt import numpy as np ## local modules ## import functions_v5 as fxn ### data structures ### ### functions ### ### data files ### incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/CDC_Source/Import_Data/all_cdc_source_data.csv','r') incidin.readline() # remove header incid = csv.reader(incidin, delimiter=',') popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Census/Import_Data/totalpop_age_Census_98-14.csv', 'r') pop = csv.reader(popin, delimiter=',') ### called/local plotting parameters ### ps = fxn.pseasons fw = fxn.gp_fluweeks sl = fxn.gp_ILINet_seasonlabels colvec = fxn.gp_ILINet_colors wklab = fxn.gp_weeklabels norm = fxn.gp_normweeks fs = 24 fssml = 16 ### program ### # import data d_wk, d_pop, d_totILI53ls, d_totILIadj53ls, d_ageILIadj_season = fxn.ILINet_week_RR_processing(incid, pop) d_totIncid53ls, d_totIncidAdj53ls, d_RR53ls, d_zRR53ls = fxn.week_RR_processing_part2(d_pop, d_totILI53ls, d_totILIadj53ls, d_ageILIadj_season) d_indices = fxn.identify_retro_early_weeks(d_wk, d_totIncidAdj53ls) # plot values fig = plt.figure() ax = plt.subplot(111) for s, i in zip(ps, xrange(len(ps))): ax.plot(xrange(fw), d_zRR53ls[s][:fw], marker = fxn.gp_marker, color = colvec[i], label = sl[i], linewidth = fxn.gp_linewidth) for s in ps: beg_retro, end_retro = d_indices[(s, 'r')] beg_early, end_early = d_indices[(s, 'e')] plt.plot(range(beg_retro, end_retro), d_zRR53ls[s][beg_retro:end_retro], marker = 'o', color = fxn.gp_retro_early_colors[0], linewidth = 2) plt.plot(range(beg_early, end_early), d_zRR53ls[s][beg_early:end_early], marker = 'o', color = fxn.gp_retro_early_colors[1], linewidth = 2) print np.mean(d_zRR53ls[s][beg_retro:end_retro]) plt.xlim([0, fw-1]) plt.xticks(range(fw)[::5], wklab[:fw:5]) plt.xlabel('Week Number', fontsize=fs) plt.ylabel(fxn.gp_sigmat_cdc, fontsize=fs) # shrink current axis by 10% box = ax.get_position() ax.set_position([box.x0, box.y0, box.width*0.9, box.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('/home/elee/Dropbox (Bansal Lab)/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/Submission_Materials/BMCMedicine/Submission2/SIFigures/ILINet-figs/ILINet_zRR_time.png', transparent=False, bbox_inches='tight', pad_inches=0) plt.close() # plt.show()
43.486842
286
0.737065
ce45016cddeed7ea65a507dc5b57a4d84f92d820
546
py
Python
finch/wsgi.py
Zeitsperre/finch
1660b7cbb5be80d3c92b8589a5b2a15aaaf0e789
[ "Apache-2.0" ]
null
null
null
finch/wsgi.py
Zeitsperre/finch
1660b7cbb5be80d3c92b8589a5b2a15aaaf0e789
[ "Apache-2.0" ]
null
null
null
finch/wsgi.py
Zeitsperre/finch
1660b7cbb5be80d3c92b8589a5b2a15aaaf0e789
[ "Apache-2.0" ]
1
2019-08-18T02:59:05.000Z
2019-08-18T02:59:05.000Z
import os import sentry_sdk from pywps.app.Service import Service from .processes import processes if os.environ.get("SENTRY_DSN"): sentry_sdk.init(os.environ["SENTRY_DSN"]) def create_app(cfgfiles=None): config_files = [os.path.join(os.path.dirname(__file__), 'default.cfg')] if cfgfiles: config_files.extend(cfgfiles) if 'PYWPS_CFG' in os.environ: config_files.append(os.environ['PYWPS_CFG']) service = Service(processes=processes, cfgfiles=config_files) return service application = create_app()
23.73913
75
0.730769
11bccbb97dc7b8b674c5735d6cf3a829bbb063b1
4,915
py
Python
ironic/tests/unit/objects/utils.py
ericxiett/ironic-customized
3a2ad13969e1497889a0c3be80f9f5f671ff4d1b
[ "Apache-2.0" ]
null
null
null
ironic/tests/unit/objects/utils.py
ericxiett/ironic-customized
3a2ad13969e1497889a0c3be80f9f5f671ff4d1b
[ "Apache-2.0" ]
null
null
null
ironic/tests/unit/objects/utils.py
ericxiett/ironic-customized
3a2ad13969e1497889a0c3be80f9f5f671ff4d1b
[ "Apache-2.0" ]
null
null
null
# Copyright 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Ironic object test utilities.""" import six from ironic.common import exception from ironic.common.i18n import _ from ironic import objects from ironic.tests.unit.db import utils as db_utils def check_keyword_arguments(func): @six.wraps(func) def wrapper(**kw): obj_type = kw.pop('object_type') result = func(**kw) extra_args = set(kw) - set(result) if extra_args: raise exception.InvalidParameterValue( _("Unknown keyword arguments (%(extra)s) were passed " "while creating a test %(object_type)s object.") % {"extra": ", ".join(extra_args), "object_type": obj_type}) return result return wrapper def get_test_node(ctxt, **kw): """Return a Node object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ kw['object_type'] = 'node' get_db_node_checked = check_keyword_arguments(db_utils.get_test_node) db_node = get_db_node_checked(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del db_node['id'] node = objects.Node(ctxt) for key in db_node: setattr(node, key, db_node[key]) return node def create_test_node(ctxt, **kw): """Create and return a test node object. Create a node in the DB and return a Node object with appropriate attributes. """ node = get_test_node(ctxt, **kw) node.create() return node def get_test_port(ctxt, **kw): """Return a Port object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ kw['object_type'] = 'port' get_db_port_checked = check_keyword_arguments( db_utils.get_test_port) db_port = get_db_port_checked(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del db_port['id'] port = objects.Port(ctxt) for key in db_port: setattr(port, key, db_port[key]) return port def create_test_port(ctxt, **kw): """Create and return a test port object. Create a port in the DB and return a Port object with appropriate attributes. """ port = get_test_port(ctxt, **kw) port.create() return port def get_test_chassis(ctxt, **kw): """Return a Chassis object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ kw['object_type'] = 'chassis' get_db_chassis_checked = check_keyword_arguments( db_utils.get_test_chassis) db_chassis = get_db_chassis_checked(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del db_chassis['id'] chassis = objects.Chassis(ctxt) for key in db_chassis: setattr(chassis, key, db_chassis[key]) return chassis def create_test_chassis(ctxt, **kw): """Create and return a test chassis object. Create a chassis in the DB and return a Chassis object with appropriate attributes. """ chassis = get_test_chassis(ctxt, **kw) chassis.create() return chassis def get_test_portgroup(ctxt, **kw): """Return a Portgroup object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ kw['object_type'] = 'portgroup' get_db_port_group_checked = check_keyword_arguments( db_utils.get_test_portgroup) db_portgroup = get_db_port_group_checked(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del db_portgroup['id'] portgroup = objects.Portgroup(ctxt) for key in db_portgroup: setattr(portgroup, key, db_portgroup[key]) return portgroup def create_test_portgroup(ctxt, **kw): """Create and return a test portgroup object. Create a portgroup in the DB and return a Portgroup object with appropriate attributes. """ portgroup = get_test_portgroup(ctxt, **kw) portgroup.create() return portgroup
29.969512
79
0.675687
7c024cbdfc975d84eb017252c3b7d70b65bb7841
1,035
py
Python
aether-client-library/aether/client/exceptions.py
eHealthAfrica/aether
6845d7eeebd4ae57332f73d74db3617e00032204
[ "Apache-2.0" ]
14
2018-08-09T20:57:16.000Z
2020-10-11T12:22:18.000Z
aether-client-library/aether/client/exceptions.py
eHealthAfrica/aether
6845d7eeebd4ae57332f73d74db3617e00032204
[ "Apache-2.0" ]
148
2018-07-24T10:52:29.000Z
2022-02-10T09:06:44.000Z
aether-client-library/aether/client/exceptions.py
eHealthAfrica/aether
6845d7eeebd4ae57332f73d74db3617e00032204
[ "Apache-2.0" ]
6
2018-07-25T13:33:10.000Z
2019-09-23T03:02:09.000Z
# Copyright (C) 2019 by eHealth Africa : http://www.eHealthAfrica.org # # See the NOTICE file distributed with this work for additional information # regarding copyright ownership. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # An Exception Class to wrap all handled API exceptions class AetherAPIException(Exception): def __init__(self, *args, **kwargs): msg = {k: v for k, v in kwargs.items()} for k, v in kwargs.items(): setattr(self, k, v) super(AetherAPIException, self).__init__(msg)
38.333333
75
0.72657
3536896a5fb6a68cf1bf3c27ea1f09366e3331c4
9,890
py
Python
frappe/website/doctype/blog_post/blog_post.py
Don-Leopardo/frappe
39097b05a7a9904776a435ee2c3d7a579d429389
[ "MIT" ]
3,755
2015-01-06T07:47:43.000Z
2022-03-31T20:54:23.000Z
frappe/website/doctype/blog_post/blog_post.py
Don-Leopardo/frappe
39097b05a7a9904776a435ee2c3d7a579d429389
[ "MIT" ]
7,369
2015-01-01T19:59:41.000Z
2022-03-31T23:02:05.000Z
frappe/website/doctype/blog_post/blog_post.py
Don-Leopardo/frappe
39097b05a7a9904776a435ee2c3d7a579d429389
[ "MIT" ]
2,685
2015-01-07T17:51:03.000Z
2022-03-31T23:16:24.000Z
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: MIT. See LICENSE import frappe from frappe import _ from frappe.website.website_generator import WebsiteGenerator from frappe.website.utils import clear_cache from frappe.utils import today, cint, global_date_format, get_fullname, strip_html_tags, markdown, sanitize_html from math import ceil from frappe.website.utils import (find_first_image, get_html_content_based_on_type, get_comment_list) class BlogPost(WebsiteGenerator): @frappe.whitelist() def make_route(self): if not self.route: return frappe.db.get_value('Blog Category', self.blog_category, 'route') + '/' + self.scrub(self.title) def get_feed(self): return self.title def validate(self): super(BlogPost, self).validate() if not self.blog_intro: content = get_html_content_based_on_type(self, 'content', self.content_type) self.blog_intro = content[:200] self.blog_intro = strip_html_tags(self.blog_intro) if self.blog_intro: self.blog_intro = self.blog_intro[:200] if not self.meta_title: self.meta_title = self.title[:60] else: self.meta_title = self.meta_title[:60] if not self.meta_description: self.meta_description = self.blog_intro[:140] else: self.meta_description = self.meta_description[:140] if self.published and not self.published_on: self.published_on = today() if self.featured: if not self.meta_image: frappe.throw(_("A featured post must have a cover image")) self.reset_featured_for_other_blogs() self.set_read_time() def reset_featured_for_other_blogs(self): all_posts = frappe.get_all("Blog Post", {"featured": 1}) for post in all_posts: frappe.db.set_value("Blog Post", post.name, "featured", 0) def on_update(self): super(BlogPost, self).on_update() clear_cache("writers") def on_trash(self): super(BlogPost, self).on_trash() def get_context(self, context): # this is for double precaution. usually it wont reach this code if not published if not cint(self.published): raise Exception("This blog has not been published yet!") context.no_breadcrumbs = True # temp fields context.full_name = get_fullname(self.owner) context.updated = global_date_format(self.published_on) context.social_links = self.fetch_social_links_info() context.cta = self.fetch_cta() context.enable_cta = not self.hide_cta and frappe.db.get_single_value("Blog Settings", "show_cta_in_blog", cache=True) if self.blogger: context.blogger_info = frappe.get_doc("Blogger", self.blogger).as_dict() context.author = self.blogger context.content = get_html_content_based_on_type(self, 'content', self.content_type) #if meta description is not present, then blog intro or first 140 characters of the blog will be set as description context.description = self.meta_description or self.blog_intro or strip_html_tags(context.content[:140]) context.metatags = { "name": self.meta_title, "description": context.description, } #if meta image is not present, then first image inside the blog will be set as the meta image image = find_first_image(context.content) context.metatags["image"] = self.meta_image or image or None self.load_comments(context) self.load_feedback(context) context.category = frappe.db.get_value("Blog Category", context.doc.blog_category, ["title", "route"], as_dict=1) context.parents = [{"name": _("Home"), "route":"/"}, {"name": "Blog", "route": "/blog"}, {"label": context.category.title, "route":context.category.route}] context.guest_allowed = frappe.db.get_single_value("Blog Settings", "allow_guest_to_comment") def fetch_cta(self): if frappe.db.get_single_value("Blog Settings", "show_cta_in_blog", cache=True): blog_settings = frappe.get_cached_doc("Blog Settings") return { "show_cta_in_blog": 1, "title": blog_settings.title, "subtitle": blog_settings.subtitle, "cta_label": blog_settings.cta_label, "cta_url": blog_settings.cta_url } return {} def fetch_social_links_info(self): if not frappe.db.get_single_value("Blog Settings", "enable_social_sharing", cache=True): return [] url = frappe.local.site + "/" +self.route social_links = [ { "icon": "twitter", "link": "https://twitter.com/intent/tweet?text=" + self.title + "&url=" + url }, { "icon": "facebook", "link": "https://www.facebook.com/sharer.php?u=" + url }, { "icon": "linkedin", "link": "https://www.linkedin.com/sharing/share-offsite/?url=" + url }, { "icon": "envelope", "link": "mailto:?subject=" + self.title + "&body=" + url } ] return social_links def load_comments(self, context): context.comment_list = get_comment_list(self.doctype, self.name) if not context.comment_list: context.comment_text = 0 else: context.comment_text = len(context.comment_list) def load_feedback(self, context): user = frappe.session.user feedback = frappe.get_all('Feedback', fields=['like'], filters=dict( reference_doctype=self.doctype, reference_name=self.name, ip_address=frappe.local.request_ip, owner=user ) ) like_count = 0 if frappe.db.count('Feedback'): like_count = frappe.db.count('Feedback', filters = dict( reference_doctype = self.doctype, reference_name = self.name, like = True ) ) context.user_feedback = feedback[0] if feedback else '' context.like_count = like_count def set_read_time(self): content = self.content or self.content_html or '' if self.content_type == "Markdown": content = markdown(self.content_md) total_words = len(strip_html_tags(content).split()) self.read_time = ceil(total_words/250) def get_list_context(context=None): list_context = frappe._dict( get_list = get_blog_list, no_breadcrumbs = True, hide_filters = True, children = get_children(), # show_search = True, title = _('Blog') ) category = frappe.utils.escape_html(frappe.local.form_dict.blog_category or frappe.local.form_dict.category) if category: category_title = get_blog_category(category) list_context.sub_title = _("Posts filed under {0}").format(category_title) list_context.title = category_title elif frappe.local.form_dict.blogger: blogger = frappe.db.get_value("Blogger", {"name": frappe.local.form_dict.blogger}, "full_name") list_context.sub_title = _("Posts by {0}").format(blogger) list_context.title = blogger elif frappe.local.form_dict.txt: list_context.sub_title = _('Filtered by "{0}"').format(sanitize_html(frappe.local.form_dict.txt)) if list_context.sub_title: list_context.parents = [{"name": _("Home"), "route": "/"}, {"name": "Blog", "route": "/blog"}] else: list_context.parents = [{"name": _("Home"), "route": "/"}] list_context.update(frappe.get_doc("Blog Settings").as_dict(no_default_fields=True)) return list_context def get_children(): return frappe.db.sql("""select route as name, title from `tabBlog Category` where published = 1 and exists (select name from `tabBlog Post` where `tabBlog Post`.blog_category=`tabBlog Category`.name and published=1) order by title asc""", as_dict=1) def clear_blog_cache(): for blog in frappe.db.sql_list("""select route from `tabBlog Post` where ifnull(published,0)=1"""): clear_cache(blog) clear_cache("writers") def get_blog_category(route): return frappe.db.get_value("Blog Category", {"name": route}, "title") or route def get_blog_list(doctype, txt=None, filters=None, limit_start=0, limit_page_length=20, order_by=None): conditions = [] category = filters.blog_category or frappe.utils.escape_html(frappe.local.form_dict.blog_category or frappe.local.form_dict.category) if filters: if filters.blogger: conditions.append('t1.blogger=%s' % frappe.db.escape(filters.blogger)) if category: conditions.append('t1.blog_category=%s' % frappe.db.escape(category)) if txt: conditions.append('(t1.content like {0} or t1.title like {0}")'.format(frappe.db.escape('%' + txt + '%'))) if conditions: frappe.local.no_cache = 1 query = """\ select t1.title, t1.name, t1.blog_category, t1.route, t1.published_on, t1.read_time, t1.published_on as creation, t1.read_time as read_time, t1.featured as featured, t1.meta_image as cover_image, t1.content as content, t1.content_type as content_type, t1.content_html as content_html, t1.content_md as content_md, ifnull(t1.blog_intro, t1.content) as intro, t2.full_name, t2.avatar, t1.blogger, (select count(name) from `tabComment` where comment_type='Comment' and reference_doctype='Blog Post' and reference_name=t1.name) as comments from `tabBlog Post` t1, `tabBlogger` t2 where ifnull(t1.published,0)=1 and t1.blogger = t2.name %(condition)s order by featured desc, published_on desc, name asc limit %(page_len)s OFFSET %(start)s""" % { "start": limit_start, "page_len": limit_page_length, "condition": (" and " + " and ".join(conditions)) if conditions else "" } posts = frappe.db.sql(query, as_dict=1) for post in posts: post.content = get_html_content_based_on_type(post, 'content', post.content_type) if not post.cover_image: post.cover_image = find_first_image(post.content) post.published = global_date_format(post.creation) post.content = strip_html_tags(post.content) if not post.comments: post.comment_text = _('No comments yet') elif post.comments==1: post.comment_text = _('1 comment') else: post.comment_text = _('{0} comments').format(str(post.comments)) post.avatar = post.avatar or "" post.category = frappe.db.get_value('Blog Category', post.blog_category, ['name', 'route', 'title'], as_dict=True) if post.avatar and (not "http:" in post.avatar and not "https:" in post.avatar) and not post.avatar.startswith("/"): post.avatar = "/" + post.avatar return posts
32.966667
134
0.720829
e7fa21b350c0b53965c881f1123b33d8397bc899
484
py
Python
tests/test_xml.py
relevitt/SoCo
aeffc02d11dbfc60e4589c473a3a528abaceea0a
[ "MIT" ]
1,149
2015-01-02T02:08:34.000Z
2022-03-30T13:58:04.000Z
tests/test_xml.py
relevitt/SoCo
aeffc02d11dbfc60e4589c473a3a528abaceea0a
[ "MIT" ]
630
2015-01-01T10:44:22.000Z
2022-03-17T00:25:55.000Z
tests/test_xml.py
relevitt/SoCo
aeffc02d11dbfc60e4589c473a3a528abaceea0a
[ "MIT" ]
249
2015-01-07T20:11:10.000Z
2022-03-14T05:54:20.000Z
"""Tests for the xml module.""" from soco import xml def test_ns_tag(): """Test the ns_tag function.""" namespaces = [ "http://purl.org/dc/elements/1.1/", "urn:schemas-upnp-org:metadata-1-0/upnp/", "urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/", ] for ns_in, namespace in zip(["dc", "upnp", ""], namespaces): res = xml.ns_tag(ns_in, "testtag") correct = "{{{}}}{}".format(namespace, "testtag") assert res == correct
26.888889
64
0.57438
66f928da14da2a7dd7f0712cbfcc9a2cea3b77ae
864
py
Python
sourcecode/13/13.7.3/flexGridSizer.py
ydong08/PythonCode
268f538608917565cdfc8eadbd04101606314c97
[ "MIT" ]
null
null
null
sourcecode/13/13.7.3/flexGridSizer.py
ydong08/PythonCode
268f538608917565cdfc8eadbd04101606314c97
[ "MIT" ]
null
null
null
sourcecode/13/13.7.3/flexGridSizer.py
ydong08/PythonCode
268f538608917565cdfc8eadbd04101606314c97
[ "MIT" ]
null
null
null
#!/usr/bin/python # -*- coding: UTF-8 -*- import wx class GridSizerFrame(wx.Frame): def __init__(self): wx.Frame.__init__(self, None, -1, u"布局管理器-gridSizer", size = (300, 150)) panel = wx.Panel(self, -1) colorList = [u'红', u'蓝', u'绿', u'黄', u'黑'] sizer = wx.FlexGridSizer(rows = 3, cols=3, hgap=5, vgap=5) for color in colorList: btn = wx.Button(panel, -1, color) sizer.Add(btn, 0, 0) btn = wx.Button(panel, -1, u'紫') btn.SetMinSize((100,40)) sizer.Add(btn, 0, 0) btn = wx.Button(panel, -1, u'白') btn.SetMinSize((50,30)) sizer.Add(btn, 0, 0) panel.SetSizer(sizer) panel.Fit() help(wx.ComboBox.__init__) if __name__ == '__main__': app = wx.PySimpleApp() frame = GridSizerFrame() frame.Show() app.MainLoop()
29.793103
80
0.543981
6f43c0eab27a42be90ab352569ae7cdfea239f41
928
py
Python
appendices/xunit/test_xUnit_fixtures.py
Jamrozinski/PythonTestingWithPytest
0dceb58f0b17fefa776748c93f5df062395d00be
[ "MIT" ]
11
2021-05-06T12:39:39.000Z
2022-03-14T11:58:44.000Z
appendices/xunit/test_xUnit_fixtures.py
Jamrozinski/PythonTestingWithPytest
0dceb58f0b17fefa776748c93f5df062395d00be
[ "MIT" ]
34
2019-12-16T16:53:24.000Z
2022-01-13T02:29:30.000Z
appendices/xunit/test_xUnit_fixtures.py
Jamrozinski/PythonTestingWithPytest
0dceb58f0b17fefa776748c93f5df062395d00be
[ "MIT" ]
11
2021-06-10T21:19:42.000Z
2022-02-21T04:03:06.000Z
def setup_module(module): print(f'\nsetup_module() for {module.__name__}') def teardown_module(module): print(f'teardown_module() for {module.__name__}') def setup_function(function): print(f'setup_function() for {function.__name__}') def teardown_function(function): print(f'teardown_function() for {function.__name__}') def test_1(): print('test_1()') def test_2(): print('test_2()') class TestClass: @classmethod def setup_class(cls): print(f'setup_class() for class {cls.__name__}') @classmethod def teardown_class(cls): print(f'teardown_class() for {cls.__name__}') def setup_method(self, method): print(f'setup_method() for {method.__name__}') def teardown_method(self, method): print(f'teardown_method() for {method.__name__}') def test_3(self): print('test_3()') def test_4(self): print('test_4()')
20.622222
57
0.657328
e83714b7954846dd1594901a6f4aef2647778085
12,074
py
Python
venv/Lib/site-packages/skimage/registration/_phase_cross_correlation.py
amelliaaas/tugastkc4
f442382c72379e911f3780543b95345a3b1c9407
[ "Apache-2.0" ]
4
2021-10-20T12:39:09.000Z
2022-02-26T15:02:08.000Z
venv/Lib/site-packages/skimage/registration/_phase_cross_correlation.py
amelliaaas/tugastkc4
f442382c72379e911f3780543b95345a3b1c9407
[ "Apache-2.0" ]
null
null
null
venv/Lib/site-packages/skimage/registration/_phase_cross_correlation.py
amelliaaas/tugastkc4
f442382c72379e911f3780543b95345a3b1c9407
[ "Apache-2.0" ]
20
2021-11-07T13:55:56.000Z
2021-12-02T10:54:01.000Z
""" Port of Manuel Guizar's code from: http://www.mathworks.com/matlabcentral/fileexchange/18401-efficient-subpixel-image-registration-by-cross-correlation """ import numpy as np from .._shared.fft import fftmodule as fft from ._masked_phase_cross_correlation import _masked_phase_cross_correlation def _upsampled_dft(data, upsampled_region_size, upsample_factor=1, axis_offsets=None): """ Upsampled DFT by matrix multiplication. This code is intended to provide the same result as if the following operations were performed: - Embed the array "data" in an array that is ``upsample_factor`` times larger in each dimension. ifftshift to bring the center of the image to (1,1). - Take the FFT of the larger array. - Extract an ``[upsampled_region_size]`` region of the result, starting with the ``[axis_offsets+1]`` element. It achieves this result by computing the DFT in the output array without the need to zeropad. Much faster and memory efficient than the zero-padded FFT approach if ``upsampled_region_size`` is much smaller than ``data.size * upsample_factor``. Parameters ---------- data : array The input data array (DFT of original data) to upsample. upsampled_region_size : integer or tuple of integers, optional The size of the region to be sampled. If one integer is provided, it is duplicated up to the dimensionality of ``data``. upsample_factor : integer, optional The upsampling factor. Defaults to 1. axis_offsets : tuple of integers, optional The offsets of the region to be sampled. Defaults to None (uses image center) Returns ------- output : ndarray The upsampled DFT of the specified region. """ # if people pass in an integer, expand it to a list of equal-sized sections if not hasattr(upsampled_region_size, "__iter__"): upsampled_region_size = [upsampled_region_size, ] * data.ndim else: if len(upsampled_region_size) != data.ndim: raise ValueError("shape of upsampled region sizes must be equal " "to input data's number of dimensions.") if axis_offsets is None: axis_offsets = [0, ] * data.ndim else: if len(axis_offsets) != data.ndim: raise ValueError("number of axis offsets must be equal to input " "data's number of dimensions.") im2pi = 1j * 2 * np.pi dim_properties = list(zip(data.shape, upsampled_region_size, axis_offsets)) for (n_items, ups_size, ax_offset) in dim_properties[::-1]: kernel = ((np.arange(ups_size) - ax_offset)[:, None] * fft.fftfreq(n_items, upsample_factor)) kernel = np.exp(-im2pi * kernel) # Equivalent to: # data[i, j, k] = kernel[i, :] @ data[j, k].T data = np.tensordot(kernel, data, axes=(1, -1)) return data def _compute_phasediff(cross_correlation_max): """ Compute global phase difference between the two images (should be zero if images are non-negative). Parameters ---------- cross_correlation_max : complex The complex value of the cross correlation at its maximum point. """ return np.arctan2(cross_correlation_max.imag, cross_correlation_max.real) def _compute_error(cross_correlation_max, src_amp, target_amp): """ Compute RMS error metric between ``src_image`` and ``target_image``. Parameters ---------- cross_correlation_max : complex The complex value of the cross correlation at its maximum point. src_amp : float The normalized average image intensity of the source image target_amp : float The normalized average image intensity of the target image """ error = 1.0 - cross_correlation_max * cross_correlation_max.conj() /\ (src_amp * target_amp) return np.sqrt(np.abs(error)) def phase_cross_correlation(reference_image, moving_image, *, upsample_factor=1, space="real", return_error=True, reference_mask=None, moving_mask=None, overlap_ratio=0.3): """Efficient subpixel image translation registration by cross-correlation. This code gives the same precision as the FFT upsampled cross-correlation in a fraction of the computation time and with reduced memory requirements. It obtains an initial estimate of the cross-correlation peak by an FFT and then refines the shift estimation by upsampling the DFT only in a small neighborhood of that estimate by means of a matrix-multiply DFT. Parameters ---------- reference_image : array Reference image. moving_image : array Image to register. Must be same dimensionality as ``reference_image``. upsample_factor : int, optional Upsampling factor. Images will be registered to within ``1 / upsample_factor`` of a pixel. For example ``upsample_factor == 20`` means the images will be registered within 1/20th of a pixel. Default is 1 (no upsampling). Not used if any of ``reference_mask`` or ``moving_mask`` is not None. space : string, one of "real" or "fourier", optional Defines how the algorithm interprets input data. "real" means data will be FFT'd to compute the correlation, while "fourier" data will bypass FFT of input data. Case insensitive. Not used if any of ``reference_mask`` or ``moving_mask`` is not None. return_error : bool, optional Returns error and phase difference if on, otherwise only shifts are returned. Has noeffect if any of ``reference_mask`` or ``moving_mask`` is not None. In this case only shifts is returned. reference_mask : ndarray Boolean mask for ``reference_image``. The mask should evaluate to ``True`` (or 1) on valid pixels. ``reference_mask`` should have the same shape as ``reference_image``. moving_mask : ndarray or None, optional Boolean mask for ``moving_image``. The mask should evaluate to ``True`` (or 1) on valid pixels. ``moving_mask`` should have the same shape as ``moving_image``. If ``None``, ``reference_mask`` will be used. overlap_ratio : float, optional Minimum allowed overlap ratio between images. The correlation for translations corresponding with an overlap ratio lower than this threshold will be ignored. A lower `overlap_ratio` leads to smaller maximum translation, while a higher `overlap_ratio` leads to greater robustness against spurious matches due to small overlap between masked images. Used only if one of ``reference_mask`` or ``moving_mask`` is None. Returns ------- shifts : ndarray Shift vector (in pixels) required to register ``moving_image`` with ``reference_image``. Axis ordering is consistent with numpy (e.g. Z, Y, X) error : float Translation invariant normalized RMS error between ``reference_image`` and ``moving_image``. phasediff : float Global phase difference between the two images (should be zero if images are non-negative). References ---------- .. [1] Manuel Guizar-Sicairos, Samuel T. Thurman, and James R. Fienup, "Efficient subpixel image registration algorithms," Optics Letters 33, 156-158 (2008). :DOI:`10.1364/OL.33.000156` .. [2] James R. Fienup, "Invariant error metrics for image reconstruction" Optics Letters 36, 8352-8357 (1997). :DOI:`10.1364/AO.36.008352` .. [3] Dirk Padfield. Masked Object Registration in the Fourier Domain. IEEE Transactions on Image Processing, vol. 21(5), pp. 2706-2718 (2012). :DOI:`10.1109/TIP.2011.2181402` .. [4] D. Padfield. "Masked FFT registration". In Proc. Computer Vision and Pattern Recognition, pp. 2918-2925 (2010). :DOI:`10.1109/CVPR.2010.5540032` """ if (reference_mask is not None) or (moving_mask is not None): return _masked_phase_cross_correlation(reference_image, moving_image, reference_mask, moving_mask, overlap_ratio) # images must be the same shape if reference_image.shape != moving_image.shape: raise ValueError("images must be same shape") # assume complex data is already in Fourier space if space.lower() == 'fourier': src_freq = reference_image target_freq = moving_image # real data needs to be fft'd. elif space.lower() == 'real': src_freq = fft.fftn(reference_image) target_freq = fft.fftn(moving_image) else: raise ValueError('space argument must be "real" of "fourier"') # Whole-pixel shift - Compute cross-correlation by an IFFT shape = src_freq.shape image_product = src_freq * target_freq.conj() cross_correlation = fft.ifftn(image_product) # Locate maximum maxima = np.unravel_index(np.argmax(np.abs(cross_correlation)), cross_correlation.shape) midpoints = np.array([np.fix(axis_size / 2) for axis_size in shape]) shifts = np.stack(maxima).astype(np.float64) shifts[shifts > midpoints] -= np.array(shape)[shifts > midpoints] if upsample_factor == 1: if return_error: src_amp = np.sum(np.real(src_freq * src_freq.conj())) src_amp /= src_freq.size target_amp = np.sum(np.real(target_freq * target_freq.conj())) target_amp /= target_freq.size CCmax = cross_correlation[maxima] # If upsampling > 1, then refine estimate with matrix multiply DFT else: # Initial shift estimate in upsampled grid shifts = np.round(shifts * upsample_factor) / upsample_factor upsampled_region_size = np.ceil(upsample_factor * 1.5) # Center of output array at dftshift + 1 dftshift = np.fix(upsampled_region_size / 2.0) upsample_factor = np.array(upsample_factor, dtype=np.float64) # Matrix multiply DFT around the current shift estimate sample_region_offset = dftshift - shifts*upsample_factor cross_correlation = _upsampled_dft(image_product.conj(), upsampled_region_size, upsample_factor, sample_region_offset).conj() # Locate maximum and map back to original pixel grid maxima = np.unravel_index(np.argmax(np.abs(cross_correlation)), cross_correlation.shape) CCmax = cross_correlation[maxima] maxima = np.stack(maxima).astype(np.float64) - dftshift shifts = shifts + maxima / upsample_factor if return_error: src_amp = np.sum(np.real(src_freq * src_freq.conj())) target_amp = np.sum(np.real(target_freq * target_freq.conj())) # If its only one row or column the shift along that dimension has no # effect. We set to zero. for dim in range(src_freq.ndim): if shape[dim] == 1: shifts[dim] = 0 if return_error: # Redirect user to masked_phase_cross_correlation if NaNs are observed if np.isnan(CCmax) or np.isnan(src_amp) or np.isnan(target_amp): raise ValueError( "NaN values found, please remove NaNs from your " "input data or use the `reference_mask`/`moving_mask` " "keywords, eg: " "phase_cross_correlation(reference_image, moving_image, " "reference_mask=~np.isnan(reference_image), " "moving_mask=~np.isnan(moving_image))") return shifts, _compute_error(CCmax, src_amp, target_amp),\ _compute_phasediff(CCmax) else: return shifts
43.588448
116
0.64817
13c23d851c5c95c0b268aa2fc1e59e058ce6122a
544
py
Python
app/api/routes.py
marirs/flask-api-with-jwt
0d7ffde0ea16566db65f9f521d7a9bf755d1d20b
[ "MIT" ]
null
null
null
app/api/routes.py
marirs/flask-api-with-jwt
0d7ffde0ea16566db65f9f521d7a9bf755d1d20b
[ "MIT" ]
null
null
null
app/api/routes.py
marirs/flask-api-with-jwt
0d7ffde0ea16566db65f9f521d7a9bf755d1d20b
[ "MIT" ]
null
null
null
""" API Routes Desc: Skeleton app to access API's with JWT tokens Author: [marirs,] version: 1.0 """ import os from flask import Blueprint, request, jsonify from app import db from app.auth.routes import requires_token from uuid import uuid4 api = Blueprint('api', __name__) @api.route('/', methods=['POST']) @requires_token def api_home(current_user, roles): """ Sample Home protected API endpoint :param current_user: :param roles: :return: """ return jsonify({'message': 'api home', 'status': 'success'}), 200
20.923077
69
0.693015
cbc7e5061d720dbd72fd970fc1afe37443665941
647
py
Python
djangobench/benchmarks/url_resolve_flat/benchmark.py
smithdc1/djangobench
912dc536db706fb73d24a53d10d1739a0824d2c1
[ "BSD-3-Clause" ]
null
null
null
djangobench/benchmarks/url_resolve_flat/benchmark.py
smithdc1/djangobench
912dc536db706fb73d24a53d10d1739a0824d2c1
[ "BSD-3-Clause" ]
null
null
null
djangobench/benchmarks/url_resolve_flat/benchmark.py
smithdc1/djangobench
912dc536db706fb73d24a53d10d1739a0824d2c1
[ "BSD-3-Clause" ]
null
null
null
try: from django.urls import Resolver404, resolve except ImportError: # Django < 1.10 from django.core.urlresolvers import resolve, Resolver404 from djangobench.utils import run_benchmark def benchmark(): paths = ( '/user/repo/feature19', '/section0/feature0', '/en/feature10', '/ru/feature10', '/missing', ) for i in range(0, 100): for path in paths: try: resolve(path) except Resolver404: pass run_benchmark( benchmark, meta={ 'description': 'URL resolution with long-flat list of patterns.', } )
22.310345
73
0.579598
126af78a21306e3a47126aee41a01d5f3c8e06e4
3,891
py
Python
sdk/env.py
Gussy/titanium_desktop
37dbaab5664e595115e2fcdc348ed125cd50b48d
[ "Apache-2.0" ]
3
2020-01-31T03:40:26.000Z
2022-02-06T12:20:52.000Z
sdk/env.py
appcelerator-archive/titanium_desktop
37dbaab5664e595115e2fcdc348ed125cd50b48d
[ "Apache-2.0" ]
null
null
null
sdk/env.py
appcelerator-archive/titanium_desktop
37dbaab5664e595115e2fcdc348ed125cd50b48d
[ "Apache-2.0" ]
null
null
null
import app import osx_app import linux_app import win32_app import os import platform import subprocess import sys import os.path as p import __init__ class PackagingEnvironment(object): def __init__(self, target_os, packaging_server=False, appstore=False): self.components_dir = None self.version = __init__.get_titanium_version() self.excludes = ['.pdb', '.exp', '.ilk', '.lib', '.svn', '.git', '.gitignore', '.cvsignore'] # Do not include WebKit for app store builds. if appstore: self.excludes.extend([ 'WebKit.framework', 'WebCore.framework', 'JavaScriptCore.framework']) self.target_os = target_os script_dir = p.abspath(p.dirname(sys._getframe(0).f_code.co_filename)) if packaging_server: self.init_packaging_server_dirs(script_dir) else: self.init_normal_dirs(script_dir) def init_packaging_server_dirs(self, script_dir): self.install_dirs = [p.join(script_dir, '..', '..', '..')] def init_normal_dirs(self, script_dir): if (self.target_os == 'linux'): self.install_dirs = [ p.expanduser('~/.titanium'), "/opt/titanium", "/usr/local/lib/titanium", "/usr/lib/titanium" ] elif (self.target_os == 'osx'): self.install_dirs = [ p.expanduser('~/Library/Application Support/Titanium'), '/Library/Application Support/Titanium' ] elif (self.target_os == 'win32'): self.install_dirs = [ p.join(os.environ['APPDATA'], 'Titanium'), # TODO: Is there a better way to determine this directory? 'C:\\ProgramData\\Titanium', 'C:\\Documents and Settings\All Users\Application Data\Titanium' ] else: raise Exception("Unknown environment!") # If we are in the build hierarchy, try to find runtimes and modules # relative to this file's location. build_subpath = p.join('build', self.target_os) self.components_dir = None if (p.exists(p.join(script_dir, '..', 'kroll')) and p.exists(p.join(script_dir, '..', 'build', self.target_os, 'runtime')) and p.exists(p.join(script_dir, '..', 'build', self.target_os, 'sdk'))): self.components_dir = p.join(script_dir, '..', 'build', self.target_os) elif p.exists(p.join(script_dir, '..', 'runtime')) and p.exists(p.join(script_dir, '..', 'sdk')): self.components_dir = p.join(script_dir, '..') def create_app(self, path): if self.target_os == 'linux': return linux_app.LinuxApp(self, path) if self.target_os == 'osx': return osx_app.OSXApp(self, path) if self.target_os == 'win32': return win32_app.Win32App(self, path) def log(self, text): print u' -> %s' % text sys.stdout.flush() def get_excludes(self): return self.excludes def get_component(self, type, name, version): # First try the build directory. if self.components_dir: target = p.join(self.components_dir, type) if name: # Modules have names target = p.join(target, name) if p.exists(target): return target # Next try searching list of installed directories for dir in self.install_dirs: target = p.join(dir, type, self.target_os) if name: target = p.join(target, name) target = p.join(target, version) if p.exists(target): return target return None def get_sdk_dir(self, version): c = self.get_component('sdk', None, version) if not c: raise Exception(u'Could not find SDK version %s' % version) return c def get_runtime_dir(self, version): c = self.get_component('runtime', None, version) if not c: raise Exception(u'Could not find runtime version %s' % version) return c def get_module_dir(self, module): c = self.get_component('modules', module[0], module[1]) if not c: raise Exception(u'Could not find module %s-%s' % module) return c def run(self, args): self.log(u'Launching: %s' % args) subprocess.call(args) def ignore_errors(self, function): try: function() except Exception, e: self.log("Ignoring error: %s" % e)
29.477273
99
0.684914
e3d54a415db08b2719beb6e2b5da5b0a3981872c
154,845
py
Python
ghapi/metadata.py
danpalmer/ghapi
69e3495f8b6cb3974c5643b79ec7eebfc905786d
[ "Apache-2.0" ]
null
null
null
ghapi/metadata.py
danpalmer/ghapi
69e3495f8b6cb3974c5643b79ec7eebfc905786d
[ "Apache-2.0" ]
1
2021-02-22T07:04:39.000Z
2021-02-22T12:58:57.000Z
ghapi/metadata.py
danpalmer/ghapi
69e3495f8b6cb3974c5643b79ec7eebfc905786d
[ "Apache-2.0" ]
null
null
null
funcs = [('/app', 'get', 'apps/get-authenticated', 'Get the authenticated app', 'v3/apps/#get-the-authenticated-app', [], [], ''), ('/app-manifests/{code}/conversions', 'post', 'apps/create-from-manifest', 'Create a GitHub App from a manifest', 'v3/apps/#create-a-github-app-from-a-manifest', [], [], ''), ('/app/hook/config', 'get', 'apps/get-webhook-config-for-app', 'Get a webhook configuration for an app', 'v3/apps#get-a-webhook-configuration-for-an-app', [], [], ''), ('/app/hook/config', 'patch', 'apps/update-webhook-config-for-app', 'Update a webhook configuration for an app', 'v3/apps#update-a-webhook-configuration-for-an-app', [], [['url', str], ['content_type', str], ['secret', str], ['insecure_ssl', str]], ''), ('/app/installations', 'get', 'apps/list-installations', 'List installations for the authenticated app', 'v3/apps/#list-installations-for-the-authenticated-app', ['per_page', 'page', 'since', 'outdated'], [], ''), ('/app/installations/{installation_id}', 'get', 'apps/get-installation', 'Get an installation for the authenticated app', 'v3/apps/#get-an-installation-for-the-authenticated-app', [], [], ''), ('/app/installations/{installation_id}', 'delete', 'apps/delete-installation', 'Delete an installation for the authenticated app', 'v3/apps/#delete-an-installation-for-the-authenticated-app', [], [], ''), ('/app/installations/{installation_id}/access_tokens', 'post', 'apps/create-installation-access-token', 'Create an installation access token for an app', 'v3/apps/#create-an-installation-access-token-for-an-app', [], [['repositories', list], ['repository_ids', list], ['permissions', dict]], ''), ('/app/installations/{installation_id}/suspended', 'put', 'apps/suspend-installation', 'Suspend an app installation', 'v3/apps/#suspend-an-app-installation', [], [], ''), ('/app/installations/{installation_id}/suspended', 'delete', 'apps/unsuspend-installation', 'Unsuspend an app installation', 'v3/apps/#unsuspend-an-app-installation', [], [], ''), ('/applications/grants', 'get', 'oauth-authorizations/list-grants', 'List your grants', 'rest/reference/oauth-authorizations#list-your-grants', ['per_page', 'page'], [], ''), ('/applications/grants/{grant_id}', 'get', 'oauth-authorizations/get-grant', 'Get a single grant', 'rest/reference/oauth-authorizations#get-a-single-grant', [], [], ''), ('/applications/grants/{grant_id}', 'delete', 'oauth-authorizations/delete-grant', 'Delete a grant', 'rest/reference/oauth-authorizations#delete-a-grant', [], [], ''), ('/applications/{client_id}/grant', 'delete', 'apps/delete-authorization', 'Delete an app authorization', 'rest/reference/apps#delete-an-app-authorization', [], [['access_token', str]], ''), ('/applications/{client_id}/grants/{access_token}', 'delete', 'apps/revoke-grant-for-application', 'Revoke a grant for an application', 'rest/reference/apps#revoke-a-grant-for-an-application', [], [], ''), ('/applications/{client_id}/token', 'post', 'apps/check-token', 'Check a token', 'rest/reference/apps#check-a-token', [], [['access_token', str]], ''), ('/applications/{client_id}/token', 'patch', 'apps/reset-token', 'Reset a token', 'rest/reference/apps#reset-a-token', [], [['access_token', str]], ''), ('/applications/{client_id}/token', 'delete', 'apps/delete-token', 'Delete an app token', 'rest/reference/apps#delete-an-app-token', [], [['access_token', str]], ''), ('/applications/{client_id}/tokens/{access_token}', 'get', 'apps/check-authorization', 'Check an authorization', 'rest/reference/apps#check-an-authorization', [], [], ''), ('/applications/{client_id}/tokens/{access_token}', 'post', 'apps/reset-authorization', 'Reset an authorization', 'rest/reference/apps#reset-an-authorization', [], [], ''), ('/applications/{client_id}/tokens/{access_token}', 'delete', 'apps/revoke-authorization-for-application', 'Revoke an authorization for an application', 'rest/reference/apps#revoke-an-authorization-for-an-application', [], [], ''), ('/apps/{app_slug}', 'get', 'apps/get-by-slug', 'Get an app', 'v3/apps/#get-an-app', [], [], ''), ('/authorizations', 'get', 'oauth-authorizations/list-authorizations', 'List your authorizations', 'rest/reference/oauth-authorizations#list-your-authorizations', ['per_page', 'page'], [], ''), ('/authorizations', 'post', 'oauth-authorizations/create-authorization', 'Create a new authorization', 'rest/reference/oauth-authorizations#create-a-new-authorization', [], [['scopes', list], ['note', str], ['note_url', str], ['client_id', str], ['client_secret', str], ['fingerprint', str]], ''), ('/authorizations/clients/{client_id}', 'put', 'oauth-authorizations/get-or-create-authorization-for-app', 'Get-or-create an authorization for a specific app', 'rest/reference/oauth-authorizations#get-or-create-an-authorization-for-a-specific-app', [], [['client_secret', str], ['scopes', list], ['note', str], ['note_url', str], ['fingerprint', str]], ''), ('/authorizations/clients/{client_id}/{fingerprint}', 'put', 'oauth-authorizations/get-or-create-authorization-for-app-and-fingerprint', 'Get-or-create an authorization for a specific app and fingerprint', 'rest/reference/oauth-authorizations#get-or-create-an-authorization-for-a-specific-app-and-fingerprint', [], [['client_secret', str], ['scopes', list], ['note', str], ['note_url', str]], ''), ('/authorizations/{authorization_id}', 'get', 'oauth-authorizations/get-authorization', 'Get a single authorization', 'rest/reference/oauth-authorizations#get-a-single-authorization', [], [], ''), ('/authorizations/{authorization_id}', 'patch', 'oauth-authorizations/update-authorization', 'Update an existing authorization', 'rest/reference/oauth-authorizations#update-an-existing-authorization', [], [['scopes', list], ['add_scopes', list], ['remove_scopes', list], ['note', str], ['note_url', str], ['fingerprint', str]], ''), ('/authorizations/{authorization_id}', 'delete', 'oauth-authorizations/delete-authorization', 'Delete an authorization', 'rest/reference/oauth-authorizations#delete-an-authorization', [], [], ''), ('/codes_of_conduct', 'get', 'codes-of-conduct/get-all-codes-of-conduct', 'Get all codes of conduct', 'v3/codes_of_conduct/#get-all-codes-of-conduct', [], [], 'scarlet-witch'), ('/codes_of_conduct/{key}', 'get', 'codes-of-conduct/get-conduct-code', 'Get a code of conduct', 'v3/codes_of_conduct/#get-a-code-of-conduct', [], [], 'scarlet-witch'), ('/content_references/{content_reference_id}/attachments', 'post', 'apps/create-content-attachment', 'Create a content attachment', 'rest/reference/apps#create-a-content-attachment', [], [['title', str], ['body', str]], 'corsair'), ('/emojis', 'get', 'emojis/get', 'Get emojis', 'v3/emojis/#get-emojis', [], [], ''), ('/enterprises/{enterprise}/actions/permissions', 'get', 'enterprise-admin/get-github-actions-permissions-enterprise', 'Get GitHub Actions permissions for an enterprise', 'rest/reference/enterprise-admin#get-github-actions-permissions-for-an-enterprise', [], [], ''), ('/enterprises/{enterprise}/actions/permissions', 'put', 'enterprise-admin/set-github-actions-permissions-enterprise', 'Set GitHub Actions permissions for an enterprise', 'rest/reference/enterprise-admin#set-github-actions-permissions-for-an-enterprise', [], [['enabled_organizations', str], ['allowed_actions', str]], ''), ('/enterprises/{enterprise}/actions/permissions/organizations', 'get', 'enterprise-admin/list-selected-organizations-enabled-github-actions-enterprise', 'List selected organizations enabled for GitHub Actions in an enterprise', 'rest/reference/enterprise-admin#list-selected-organizations-enabled-for-github-actions-in-an-enterprise', ['per_page', 'page'], [], ''), ('/enterprises/{enterprise}/actions/permissions/organizations', 'put', 'enterprise-admin/set-selected-organizations-enabled-github-actions-enterprise', 'Set selected organizations enabled for GitHub Actions in an enterprise', 'rest/reference/enterprise-admin#set-selected-organizations-enabled-for-github-actions-in-an-enterprise', [], [['selected_organization_ids', list]], ''), ('/enterprises/{enterprise}/actions/permissions/organizations/{org_id}', 'put', 'enterprise-admin/enable-selected-organization-github-actions-enterprise', 'Enable a selected organization for GitHub Actions in an enterprise', 'rest/reference/enterprise-admin#enable-a-selected-organization-for-github-actions-in-an-enterprise', [], [], ''), ('/enterprises/{enterprise}/actions/permissions/organizations/{org_id}', 'delete', 'enterprise-admin/disable-selected-organization-github-actions-enterprise', 'Disable a selected organization for GitHub Actions in an enterprise', 'rest/reference/enterprise-admin#disable-a-selected-organization-for-github-actions-in-an-enterprise', [], [], ''), ('/enterprises/{enterprise}/actions/permissions/selected-actions', 'get', 'enterprise-admin/get-allowed-actions-enterprise', 'Get allowed actions for an enterprise', 'rest/reference/enterprise-admin#get-allowed-actions-for-an-enterprise', [], [], ''), ('/enterprises/{enterprise}/actions/permissions/selected-actions', 'put', 'enterprise-admin/set-allowed-actions-enterprise', 'Set allowed actions for an enterprise', 'rest/reference/enterprise-admin#set-allowed-actions-for-an-enterprise', [], [['github_owned_allowed', bool], ['verified_allowed', bool], ['patterns_allowed', list]], ''), ('/enterprises/{enterprise}/actions/runner-groups', 'get', 'enterprise-admin/list-self-hosted-runner-groups-for-enterprise', 'List self-hosted runner groups for an enterprise', 'rest/reference/enterprise-admin#list-self-hosted-runner-groups-for-an-enterprise', ['per_page', 'page'], [], ''), ('/enterprises/{enterprise}/actions/runner-groups', 'post', 'enterprise-admin/create-self-hosted-runner-group-for-enterprise', 'Create a self-hosted runner group for an enterprise', 'rest/reference/enterprise-admin#create-self-hosted-runner-group-for-an-enterprise', [], [['name', str], ['visibility', str], ['selected_organization_ids', list], ['runners', list]], ''), ('/enterprises/{enterprise}/actions/runner-groups/{runner_group_id}', 'get', 'enterprise-admin/get-self-hosted-runner-group-for-enterprise', 'Get a self-hosted runner group for an enterprise', 'rest/reference/enterprise-admin#get-a-self-hosted-runner-group-for-an-enterprise', [], [], ''), ('/enterprises/{enterprise}/actions/runner-groups/{runner_group_id}', 'patch', 'enterprise-admin/update-self-hosted-runner-group-for-enterprise', 'Update a self-hosted runner group for an enterprise', 'rest/reference/enterprise-admin#update-a-self-hosted-runner-group-for-an-enterprise', [], [['name', str], ['visibility', str, 'all']], ''), ('/enterprises/{enterprise}/actions/runner-groups/{runner_group_id}', 'delete', 'enterprise-admin/delete-self-hosted-runner-group-from-enterprise', 'Delete a self-hosted runner group from an enterprise', 'rest/reference/enterprise-admin#delete-a-self-hosted-runner-group-from-an-enterprise', [], [], ''), ('/enterprises/{enterprise}/actions/runner-groups/{runner_group_id}/organizations', 'get', 'enterprise-admin/list-org-access-to-self-hosted-runner-group-in-enterprise', 'List organization access to a self-hosted runner group in an enterprise', 'rest/reference/enterprise-admin#list-organization-access-to-a-self-hosted-runner-group-in-a-enterprise', ['per_page', 'page'], [], ''), ('/enterprises/{enterprise}/actions/runner-groups/{runner_group_id}/organizations', 'put', 'enterprise-admin/set-org-access-to-self-hosted-runner-group-in-enterprise', 'Set organization access for a self-hosted runner group in an enterprise', 'rest/reference/enterprise-admin#set-organization-access-to-a-self-hosted-runner-group-in-an-enterprise', [], [['selected_organization_ids', list]], ''), ('/enterprises/{enterprise}/actions/runner-groups/{runner_group_id}/organizations/{org_id}', 'put', 'enterprise-admin/add-org-access-to-self-hosted-runner-group-in-enterprise', 'Add organization access to a self-hosted runner group in an enterprise', 'rest/reference/enterprise-admin#add-organization-access-to-a-self-hosted-runner-group-in-an-enterprise', [], [], ''), ('/enterprises/{enterprise}/actions/runner-groups/{runner_group_id}/organizations/{org_id}', 'delete', 'enterprise-admin/remove-org-access-to-self-hosted-runner-group-in-enterprise', 'Remove organization access to a self-hosted runner group in an enterprise', 'rest/reference/enterprise-admin#remove-organization-access-to-a-self-hosted-runner-group-in-an-enterprise', [], [], ''), ('/enterprises/{enterprise}/actions/runner-groups/{runner_group_id}/runners', 'get', 'enterprise-admin/list-self-hosted-runners-in-group-for-enterprise', 'List self-hosted runners in a group for an enterprise', 'rest/reference/enterprise-admin#list-self-hosted-runners-in-a-group-for-an-enterprise', ['per_page', 'page'], [], ''), ('/enterprises/{enterprise}/actions/runner-groups/{runner_group_id}/runners', 'put', 'enterprise-admin/set-self-hosted-runners-in-group-for-enterprise', 'Set self-hosted runners in a group for an enterprise', 'rest/reference/enterprise-admin#set-self-hosted-runners-in-a-group-for-an-enterprise', [], [['runners', list]], ''), ('/enterprises/{enterprise}/actions/runner-groups/{runner_group_id}/runners/{runner_id}', 'put', 'enterprise-admin/add-self-hosted-runner-to-group-for-enterprise', 'Add a self-hosted runner to a group for an enterprise', 'rest/reference/enterprise-admin#add-a-self-hosted-runner-to-a-group-for-an-enterprise', [], [], ''), ('/enterprises/{enterprise}/actions/runner-groups/{runner_group_id}/runners/{runner_id}', 'delete', 'enterprise-admin/remove-self-hosted-runner-from-group-for-enterprise', 'Remove a self-hosted runner from a group for an enterprise', 'rest/reference/enterprise-admin#remove-a-self-hosted-runner-from-a-group-for-an-enterprise', [], [], ''), ('/enterprises/{enterprise}/actions/runners', 'get', 'enterprise-admin/list-self-hosted-runners-for-enterprise', 'List self-hosted runners for an enterprise', 'rest/reference/enterprise-admin#list-self-hosted-runners-for-an-enterprise', ['per_page', 'page'], [], ''), ('/enterprises/{enterprise}/actions/runners/downloads', 'get', 'enterprise-admin/list-runner-applications-for-enterprise', 'List runner applications for an enterprise', 'rest/reference/enterprise-admin#list-runner-applications-for-an-enterprise', [], [], ''), ('/enterprises/{enterprise}/actions/runners/registration-token', 'post', 'enterprise-admin/create-registration-token-for-enterprise', 'Create a registration token for an enterprise', 'rest/reference/enterprise-admin#create-a-registration-token-for-an-enterprise', [], [], ''), ('/enterprises/{enterprise}/actions/runners/remove-token', 'post', 'enterprise-admin/create-remove-token-for-enterprise', 'Create a remove token for an enterprise', 'rest/reference/enterprise-admin#create-a-remove-token-for-an-enterprise', [], [], ''), ('/enterprises/{enterprise}/actions/runners/{runner_id}', 'get', 'enterprise-admin/get-self-hosted-runner-for-enterprise', 'Get a self-hosted runner for an enterprise', 'rest/reference/enterprise-admin#get-a-self-hosted-runner-for-an-enterprise', [], [], ''), ('/enterprises/{enterprise}/actions/runners/{runner_id}', 'delete', 'enterprise-admin/delete-self-hosted-runner-from-enterprise', 'Delete a self-hosted runner from an enterprise', 'rest/reference/enterprise-admin#delete-self-hosted-runner-from-an-enterprise', [], [], ''), ('/enterprises/{enterprise}/settings/billing/actions', 'get', 'billing/get-github-actions-billing-ghe', 'Get GitHub Actions billing for an enterprise', 'v3/billing/#get-github-actions-billing-for-an-enterprise', [], [], ''), ('/enterprises/{enterprise}/settings/billing/packages', 'get', 'billing/get-github-packages-billing-ghe', 'Get GitHub Packages billing for an enterprise', 'v3/billing/#get-github-packages-billing-for-an-enterprise', [], [], ''), ('/enterprises/{enterprise}/settings/billing/shared-storage', 'get', 'billing/get-shared-storage-billing-ghe', 'Get shared storage billing for an enterprise', 'v3/billing/#get-shared-storage-billing-for-an-enterprise', [], [], ''), ('/events', 'get', 'activity/list-public-events', 'List public events', 'rest/reference/activity#list-public-events', ['per_page', 'page'], [], ''), ('/feeds', 'get', 'activity/get-feeds', 'Get feeds', 'rest/reference/activity#get-feeds', [], [], ''), ('/gists', 'get', 'gists/list', 'List gists for the authenticated user', 'v3/gists/#list-gists-for-the-authenticated-user', ['since', 'per_page', 'page'], [], ''), ('/gists', 'post', 'gists/create', 'Create a gist', 'v3/gists/#create-a-gist', [], [['description', str], ['files', dict], ['public', object]], ''), ('/gists/public', 'get', 'gists/list-public', 'List public gists', 'v3/gists/#list-public-gists', ['since', 'per_page', 'page'], [], ''), ('/gists/starred', 'get', 'gists/list-starred', 'List starred gists', 'v3/gists/#list-starred-gists', ['since', 'per_page', 'page'], [], ''), ('/gists/{gist_id}', 'get', 'gists/get', 'Get a gist', 'v3/gists/#get-a-gist', [], [], ''), ('/gists/{gist_id}', 'patch', 'gists/update', 'Update a gist', 'v3/gists/#update-a-gist', [], [['description', str], ['files', dict]], ''), ('/gists/{gist_id}', 'delete', 'gists/delete', 'Delete a gist', 'v3/gists/#delete-a-gist', [], [], ''), ('/gists/{gist_id}/comments', 'get', 'gists/list-comments', 'List gist comments', 'rest/reference/gists#list-gist-comments', ['per_page', 'page'], [], ''), ('/gists/{gist_id}/comments', 'post', 'gists/create-comment', 'Create a gist comment', 'rest/reference/gists#create-a-gist-comment', [], [['body', str]], ''), ('/gists/{gist_id}/comments/{comment_id}', 'get', 'gists/get-comment', 'Get a gist comment', 'rest/reference/gists#get-a-gist-comment', [], [], ''), ('/gists/{gist_id}/comments/{comment_id}', 'patch', 'gists/update-comment', 'Update a gist comment', 'rest/reference/gists#update-a-gist-comment', [], [['body', str]], ''), ('/gists/{gist_id}/comments/{comment_id}', 'delete', 'gists/delete-comment', 'Delete a gist comment', 'rest/reference/gists#delete-a-gist-comment', [], [], ''), ('/gists/{gist_id}/commits', 'get', 'gists/list-commits', 'List gist commits', 'v3/gists/#list-gist-commits', ['per_page', 'page'], [], ''), ('/gists/{gist_id}/forks', 'get', 'gists/list-forks', 'List gist forks', 'v3/gists/#list-gist-forks', ['per_page', 'page'], [], ''), ('/gists/{gist_id}/forks', 'post', 'gists/fork', 'Fork a gist', 'v3/gists/#fork-a-gist', [], [], ''), ('/gists/{gist_id}/star', 'get', 'gists/check-is-starred', 'Check if a gist is starred', 'v3/gists/#check-if-a-gist-is-starred', [], [], ''), ('/gists/{gist_id}/star', 'put', 'gists/star', 'Star a gist', 'v3/gists/#star-a-gist', [], [], ''), ('/gists/{gist_id}/star', 'delete', 'gists/unstar', 'Unstar a gist', 'v3/gists/#unstar-a-gist', [], [], ''), ('/gists/{gist_id}/{sha}', 'get', 'gists/get-revision', 'Get a gist revision', 'v3/gists/#get-a-gist-revision', [], [], ''), ('/gitignore/templates', 'get', 'gitignore/get-all-templates', 'Get all gitignore templates', 'v3/gitignore/#get-all-gitignore-templates', [], [], ''), ('/gitignore/templates/{name}', 'get', 'gitignore/get-template', 'Get a gitignore template', 'v3/gitignore/#get-a-gitignore-template', [], [], ''), ('/installation/repositories', 'get', 'apps/list-repos-accessible-to-installation', 'List repositories accessible to the app installation', 'rest/reference/apps#list-repositories-accessible-to-the-app-installation', ['per_page', 'page'], [], 'mercy'), ('/installation/token', 'delete', 'apps/revoke-installation-access-token', 'Revoke an installation access token', 'rest/reference/apps#revoke-an-installation-access-token', [], [], ''), ('/issues', 'get', 'issues/list', 'List issues assigned to the authenticated user', 'v3/issues/#list-issues-assigned-to-the-authenticated-user', ['filter', 'state', 'labels', 'sort', 'direction', 'since', 'collab', 'orgs', 'owned', 'pulls', 'per_page', 'page'], [], 'squirrel-girl'), ('/licenses', 'get', 'licenses/get-all-commonly-used', 'Get all commonly used licenses', 'v3/licenses/#get-all-commonly-used-licenses', ['featured', 'per_page'], [], ''), ('/licenses/{license}', 'get', 'licenses/get', 'Get a license', 'v3/licenses/#get-a-license', [], [], ''), ('/markdown', 'post', 'markdown/render', 'Render a Markdown document', 'v3/markdown/#render-a-markdown-document', [], [['text', str], ['mode', str, 'markdown'], ['context', str]], ''), ('/markdown/raw', 'post', 'markdown/render-raw', 'Render a Markdown document in raw mode', 'v3/markdown/#render-a-markdown-document-in-raw-mode', [], [], ''), ('/marketplace_listing/accounts/{account_id}', 'get', 'apps/get-subscription-plan-for-account', 'Get a subscription plan for an account', 'rest/reference/apps#get-a-subscription-plan-for-an-account', [], [], ''), ('/marketplace_listing/plans', 'get', 'apps/list-plans', 'List plans', 'rest/reference/apps#list-plans', ['per_page', 'page'], [], ''), ('/marketplace_listing/plans/{plan_id}/accounts', 'get', 'apps/list-accounts-for-plan', 'List accounts for a plan', 'rest/reference/apps#list-accounts-for-a-plan', ['sort', 'direction', 'per_page', 'page'], [], ''), ('/marketplace_listing/stubbed/accounts/{account_id}', 'get', 'apps/get-subscription-plan-for-account-stubbed', 'Get a subscription plan for an account (stubbed)', 'rest/reference/apps#get-a-subscription-plan-for-an-account-stubbed', [], [], ''), ('/marketplace_listing/stubbed/plans', 'get', 'apps/list-plans-stubbed', 'List plans (stubbed)', 'rest/reference/apps#list-plans-stubbed', ['per_page', 'page'], [], ''), ('/marketplace_listing/stubbed/plans/{plan_id}/accounts', 'get', 'apps/list-accounts-for-plan-stubbed', 'List accounts for a plan (stubbed)', 'rest/reference/apps#list-accounts-for-a-plan-stubbed', ['sort', 'direction', 'per_page', 'page'], [], ''), ('/meta', 'get', 'meta/get', 'Get GitHub meta information', 'v3/meta/#get-github-meta-information', [], [], ''), ('/networks/{owner}/{repo}/events', 'get', 'activity/list-public-events-for-repo-network', 'List public events for a network of repositories', 'rest/reference/activity#list-public-events-for-a-network-of-repositories', ['per_page', 'page'], [], ''), ('/notifications', 'get', 'activity/list-notifications-for-authenticated-user', 'List notifications for the authenticated user', 'rest/reference/activity#list-notifications-for-the-authenticated-user', ['all', 'participating', 'since', 'before', 'per_page', 'page'], [], ''), ('/notifications', 'put', 'activity/mark-notifications-as-read', 'Mark notifications as read', 'rest/reference/activity#mark-notifications-as-read', [], [['last_read_at', str], ['read', bool]], ''), ('/notifications/threads/{thread_id}', 'get', 'activity/get-thread', 'Get a thread', 'rest/reference/activity#get-a-thread', [], [], ''), ('/notifications/threads/{thread_id}', 'patch', 'activity/mark-thread-as-read', 'Mark a thread as read', 'rest/reference/activity#mark-a-thread-as-read', [], [], ''), ('/notifications/threads/{thread_id}/subscription', 'get', 'activity/get-thread-subscription-for-authenticated-user', 'Get a thread subscription for the authenticated user', 'rest/reference/activity#get-a-thread-subscription-for-the-authenticated-user', [], [], ''), ('/notifications/threads/{thread_id}/subscription', 'put', 'activity/set-thread-subscription', 'Set a thread subscription', 'rest/reference/activity#set-a-thread-subscription', [], [['ignored', bool, False]], ''), ('/notifications/threads/{thread_id}/subscription', 'delete', 'activity/delete-thread-subscription', 'Delete a thread subscription', 'rest/reference/activity#delete-a-thread-subscription', [], [], ''), ('/organizations', 'get', 'orgs/list', 'List organizations', 'v3/orgs/#list-organizations', ['since', 'per_page'], [], ''), ('/orgs/{org}', 'get', 'orgs/get', 'Get an organization', 'v3/orgs/#get-an-organization', [], [], 'surtur'), ('/orgs/{org}', 'patch', 'orgs/update', 'Update an organization', 'v3/orgs/#update-an-organization', [], [['billing_email', str], ['company', str], ['email', str], ['twitter_username', str], ['location', str], ['name', str], ['description', str], ['has_organization_projects', bool], ['has_repository_projects', bool], ['default_repository_permission', str, 'read'], ['members_can_create_repositories', bool, True], ['members_can_create_internal_repositories', bool], ['members_can_create_private_repositories', bool], ['members_can_create_public_repositories', bool], ['members_allowed_repository_creation_type', str], ['members_can_create_pages', bool], ['blog', str]], 'surtur'), ('/orgs/{org}/actions/permissions', 'get', 'actions/get-github-actions-permissions-organization', 'Get GitHub Actions permissions for an organization', 'rest/reference/actions#get-github-actions-permissions-for-an-organization', [], [], ''), ('/orgs/{org}/actions/permissions', 'put', 'actions/set-github-actions-permissions-organization', 'Set GitHub Actions permissions for an organization', 'rest/reference/actions#set-github-actions-permissions-for-an-organization', [], [['enabled_repositories', str], ['allowed_actions', str]], ''), ('/orgs/{org}/actions/permissions/repositories', 'get', 'actions/list-selected-repositories-enabled-github-actions-organization', 'List selected repositories enabled for GitHub Actions in an organization', 'rest/reference/actions#list-selected-repositories-enabled-for-github-actions-in-an-organization', ['per_page', 'page'], [], ''), ('/orgs/{org}/actions/permissions/repositories', 'put', 'actions/set-selected-repositories-enabled-github-actions-organization', 'Set selected repositories enabled for GitHub Actions in an organization', 'rest/reference/actions#set-selected-repositories-enabled-for-github-actions-in-an-organization', [], [['selected_repository_ids', list]], ''), ('/orgs/{org}/actions/permissions/repositories/{repository_id}', 'put', 'actions/enable-selected-repository-github-actions-organization', 'Enable a selected repository for GitHub Actions in an organization', 'rest/reference/actions#enable-a-selected-repository-for-github-actions-in-an-organization', [], [], ''), ('/orgs/{org}/actions/permissions/repositories/{repository_id}', 'delete', 'actions/disable-selected-repository-github-actions-organization', 'Disable a selected repository for GitHub Actions in an organization', 'rest/reference/actions#disable-a-selected-repository-for-github-actions-in-an-organization', [], [], ''), ('/orgs/{org}/actions/permissions/selected-actions', 'get', 'actions/get-allowed-actions-organization', 'Get allowed actions for an organization', 'rest/reference/actions#get-allowed-actions-for-an-organization', [], [], ''), ('/orgs/{org}/actions/permissions/selected-actions', 'put', 'actions/set-allowed-actions-organization', 'Set allowed actions for an organization', 'rest/reference/actions#set-allowed-actions-for-an-organization', [], [['github_owned_allowed', bool], ['verified_allowed', bool], ['patterns_allowed', list]], ''), ('/orgs/{org}/actions/runner-groups', 'get', 'actions/list-self-hosted-runner-groups-for-org', 'List self-hosted runner groups for an organization', 'rest/reference/actions#list-self-hosted-runner-groups-for-an-organization', ['per_page', 'page'], [], ''), ('/orgs/{org}/actions/runner-groups', 'post', 'actions/create-self-hosted-runner-group-for-org', 'Create a self-hosted runner group for an organization', 'rest/reference/actions#create-a-self-hosted-runner-group-for-an-organization', [], [['name', str], ['visibility', str, 'all'], ['selected_repository_ids', list], ['runners', list]], ''), ('/orgs/{org}/actions/runner-groups/{runner_group_id}', 'get', 'actions/get-self-hosted-runner-group-for-org', 'Get a self-hosted runner group for an organization', 'rest/reference/actions#get-a-self-hosted-runner-group-for-an-organization', [], [], ''), ('/orgs/{org}/actions/runner-groups/{runner_group_id}', 'patch', 'actions/update-self-hosted-runner-group-for-org', 'Update a self-hosted runner group for an organization', 'rest/reference/actions#update-a-self-hosted-runner-group-for-an-organization', [], [['name', str], ['visibility', str]], ''), ('/orgs/{org}/actions/runner-groups/{runner_group_id}', 'delete', 'actions/delete-self-hosted-runner-group-from-org', 'Delete a self-hosted runner group from an organization', 'rest/reference/actions#delete-a-self-hosted-runner-group-from-an-organization', [], [], ''), ('/orgs/{org}/actions/runner-groups/{runner_group_id}/repositories', 'get', 'actions/list-repo-access-to-self-hosted-runner-group-in-org', 'List repository access to a self-hosted runner group in an organization', 'rest/reference/actions#list-repository-access-to-a-self-hosted-runner-group-in-an-organization', [], [], ''), ('/orgs/{org}/actions/runner-groups/{runner_group_id}/repositories', 'put', 'actions/set-repo-access-to-self-hosted-runner-group-in-org', 'Set repository access for a self-hosted runner group in an organization', 'rest/reference/actions#set-repository-access-to-a-self-hosted-runner-group-in-an-organization', [], [['selected_repository_ids', list]], ''), ('/orgs/{org}/actions/runner-groups/{runner_group_id}/repositories/{repository_id}', 'put', 'actions/add-repo-access-to-self-hosted-runner-group-in-org', 'Add repository access to a self-hosted runner group in an organization', 'rest/reference/actions#add-repository-acess-to-a-self-hosted-runner-group-in-an-organization', [], [], ''), ('/orgs/{org}/actions/runner-groups/{runner_group_id}/repositories/{repository_id}', 'delete', 'actions/remove-repo-access-to-self-hosted-runner-group-in-org', 'Remove repository access to a self-hosted runner group in an organization', 'rest/reference/actions#remove-repository-access-to-a-self-hosted-runner-group-in-an-organization', [], [], ''), ('/orgs/{org}/actions/runner-groups/{runner_group_id}/runners', 'get', 'actions/list-self-hosted-runners-in-group-for-org', 'List self-hosted runners in a group for an organization', 'rest/reference/actions#list-self-hosted-runners-in-a-group-for-an-organization', ['per_page', 'page'], [], ''), ('/orgs/{org}/actions/runner-groups/{runner_group_id}/runners', 'put', 'actions/set-self-hosted-runners-in-group-for-org', 'Set self-hosted runners in a group for an organization', 'rest/reference/actions#set-self-hosted-runners-in-a-group-for-an-organization', [], [['runners', list]], ''), ('/orgs/{org}/actions/runner-groups/{runner_group_id}/runners/{runner_id}', 'put', 'actions/add-self-hosted-runner-to-group-for-org', 'Add a self-hosted runner to a group for an organization', 'rest/reference/actions#add-a-self-hosted-runner-to-a-group-for-an-organization', [], [], ''), ('/orgs/{org}/actions/runner-groups/{runner_group_id}/runners/{runner_id}', 'delete', 'actions/remove-self-hosted-runner-from-group-for-org', 'Remove a self-hosted runner from a group for an organization', 'rest/reference/actions#remove-a-self-hosted-runner-from-a-group-for-an-organization', [], [], ''), ('/orgs/{org}/actions/runners', 'get', 'actions/list-self-hosted-runners-for-org', 'List self-hosted runners for an organization', 'rest/reference/actions#list-self-hosted-runners-for-an-organization', ['per_page', 'page'], [], ''), ('/orgs/{org}/actions/runners/downloads', 'get', 'actions/list-runner-applications-for-org', 'List runner applications for an organization', 'rest/reference/actions#list-runner-applications-for-an-organization', [], [], ''), ('/orgs/{org}/actions/runners/registration-token', 'post', 'actions/create-registration-token-for-org', 'Create a registration token for an organization', 'rest/reference/actions#create-a-registration-token-for-an-organization', [], [], ''), ('/orgs/{org}/actions/runners/remove-token', 'post', 'actions/create-remove-token-for-org', 'Create a remove token for an organization', 'rest/reference/actions#create-a-remove-token-for-an-organization', [], [], ''), ('/orgs/{org}/actions/runners/{runner_id}', 'get', 'actions/get-self-hosted-runner-for-org', 'Get a self-hosted runner for an organization', 'rest/reference/actions#get-a-self-hosted-runner-for-an-organization', [], [], ''), ('/orgs/{org}/actions/runners/{runner_id}', 'delete', 'actions/delete-self-hosted-runner-from-org', 'Delete a self-hosted runner from an organization', 'rest/reference/actions#delete-a-self-hosted-runner-from-an-organization', [], [], ''), ('/orgs/{org}/actions/secrets', 'get', 'actions/list-org-secrets', 'List organization secrets', 'rest/reference/actions#list-organization-secrets', ['per_page', 'page'], [], ''), ('/orgs/{org}/actions/secrets/public-key', 'get', 'actions/get-org-public-key', 'Get an organization public key', 'rest/reference/actions#get-an-organization-public-key', [], [], ''), ('/orgs/{org}/actions/secrets/{secret_name}', 'get', 'actions/get-org-secret', 'Get an organization secret', 'rest/reference/actions#get-an-organization-secret', [], [], ''), ('/orgs/{org}/actions/secrets/{secret_name}', 'put', 'actions/create-or-update-org-secret', 'Create or update an organization secret', 'rest/reference/actions#create-or-update-an-organization-secret', [], [['encrypted_value', str], ['key_id', str], ['visibility', str], ['selected_repository_ids', list]], ''), ('/orgs/{org}/actions/secrets/{secret_name}', 'delete', 'actions/delete-org-secret', 'Delete an organization secret', 'rest/reference/actions#delete-an-organization-secret', [], [], ''), ('/orgs/{org}/actions/secrets/{secret_name}/repositories', 'get', 'actions/list-selected-repos-for-org-secret', 'List selected repositories for an organization secret', 'rest/reference/actions#list-selected-repositories-for-an-organization-secret', [], [], ''), ('/orgs/{org}/actions/secrets/{secret_name}/repositories', 'put', 'actions/set-selected-repos-for-org-secret', 'Set selected repositories for an organization secret', 'rest/reference/actions#set-selected-repositories-for-an-organization-secret', [], [['selected_repository_ids', list]], ''), ('/orgs/{org}/actions/secrets/{secret_name}/repositories/{repository_id}', 'put', 'actions/add-selected-repo-to-org-secret', 'Add selected repository to an organization secret', 'rest/reference/actions#add-selected-repository-to-an-organization-secret', [], [], ''), ('/orgs/{org}/actions/secrets/{secret_name}/repositories/{repository_id}', 'delete', 'actions/remove-selected-repo-from-org-secret', 'Remove selected repository from an organization secret', 'rest/reference/actions#remove-selected-repository-from-an-organization-secret', [], [], ''), ('/orgs/{org}/blocks', 'get', 'orgs/list-blocked-users', 'List users blocked by an organization', 'rest/reference/orgs#list-users-blocked-by-an-organization', [], [], ''), ('/orgs/{org}/blocks/{username}', 'get', 'orgs/check-blocked-user', 'Check if a user is blocked by an organization', 'rest/reference/orgs#check-if-a-user-is-blocked-by-an-organization', [], [], ''), ('/orgs/{org}/blocks/{username}', 'put', 'orgs/block-user', 'Block a user from an organization', 'rest/reference/orgs#block-a-user-from-an-organization', [], [], ''), ('/orgs/{org}/blocks/{username}', 'delete', 'orgs/unblock-user', 'Unblock a user from an organization', 'rest/reference/orgs#unblock-a-user-from-an-organization', [], [], ''), ('/orgs/{org}/credential-authorizations', 'get', 'orgs/list-saml-sso-authorizations', 'List SAML SSO authorizations for an organization', 'v3/orgs/#list-saml-sso-authorizations-for-an-organization', [], [], ''), ('/orgs/{org}/credential-authorizations/{credential_id}', 'delete', 'orgs/remove-saml-sso-authorization', 'Remove a SAML SSO authorization for an organization', 'v3/orgs/#remove-a-saml-sso-authorization-for-an-organization', [], [], ''), ('/orgs/{org}/events', 'get', 'activity/list-public-org-events', 'List public organization events', 'rest/reference/activity#list-public-organization-events', ['per_page', 'page'], [], ''), ('/orgs/{org}/hooks', 'get', 'orgs/list-webhooks', 'List organization webhooks', 'rest/reference/orgs#list-organization-webhooks', ['per_page', 'page'], [], ''), ('/orgs/{org}/hooks', 'post', 'orgs/create-webhook', 'Create an organization webhook', 'rest/reference/orgs#create-an-organization-webhook', [], [['name', str], ['config', dict], ['events', list, ['push']], ['active', bool, True]], ''), ('/orgs/{org}/hooks/{hook_id}', 'get', 'orgs/get-webhook', 'Get an organization webhook', 'rest/reference/orgs#get-an-organization-webhook', [], [], ''), ('/orgs/{org}/hooks/{hook_id}', 'patch', 'orgs/update-webhook', 'Update an organization webhook', 'rest/reference/orgs#update-an-organization-webhook', [], [['config', dict], ['events', list, ['push']], ['active', bool, True], ['name', str]], ''), ('/orgs/{org}/hooks/{hook_id}', 'delete', 'orgs/delete-webhook', 'Delete an organization webhook', 'rest/reference/orgs#delete-an-organization-webhook', [], [], ''), ('/orgs/{org}/hooks/{hook_id}/config', 'get', 'orgs/get-webhook-config-for-org', 'Get a webhook configuration for an organization', 'v3/orgs#get-a-webhook-configuration-for-an-organization', [], [], ''), ('/orgs/{org}/hooks/{hook_id}/config', 'patch', 'orgs/update-webhook-config-for-org', 'Update a webhook configuration for an organization', 'v3/orgs#update-a-webhook-configuration-for-an-organization', [], [['url', str], ['content_type', str], ['secret', str], ['insecure_ssl', str]], ''), ('/orgs/{org}/hooks/{hook_id}/pings', 'post', 'orgs/ping-webhook', 'Ping an organization webhook', 'rest/reference/orgs#ping-an-organization-webhook', [], [], ''), ('/orgs/{org}/installation', 'get', 'apps/get-org-installation', 'Get an organization installation for the authenticated app', 'v3/apps/#get-an-organization-installation-for-the-authenticated-app', [], [], ''), ('/orgs/{org}/installations', 'get', 'orgs/list-app-installations', 'List app installations for an organization', 'v3/orgs/#list-app-installations-for-an-organization', ['per_page', 'page'], [], ''), ('/orgs/{org}/interaction-limits', 'get', 'interactions/get-restrictions-for-org', 'Get interaction restrictions for an organization', 'rest/reference/interactions#get-interaction-restrictions-for-an-organization', [], [], ''), ('/orgs/{org}/interaction-limits', 'put', 'interactions/set-restrictions-for-org', 'Set interaction restrictions for an organization', 'rest/reference/interactions#set-interaction-restrictions-for-an-organization', [], [['limit', str], ['expiry', str]], ''), ('/orgs/{org}/interaction-limits', 'delete', 'interactions/remove-restrictions-for-org', 'Remove interaction restrictions for an organization', 'rest/reference/interactions#remove-interaction-restrictions-for-an-organization', [], [], ''), ('/orgs/{org}/invitations', 'get', 'orgs/list-pending-invitations', 'List pending organization invitations', 'rest/reference/orgs#list-pending-organization-invitations', ['per_page', 'page'], [], ''), ('/orgs/{org}/invitations', 'post', 'orgs/create-invitation', 'Create an organization invitation', 'rest/reference/orgs#create-an-organization-invitation', [], [['invitee_id', int], ['email', str], ['role', str, 'direct_member'], ['team_ids', list]], ''), ('/orgs/{org}/invitations/{invitation_id}/teams', 'get', 'orgs/list-invitation-teams', 'List organization invitation teams', 'rest/reference/orgs#list-organization-invitation-teams', ['per_page', 'page'], [], ''), ('/orgs/{org}/issues', 'get', 'issues/list-for-org', 'List organization issues assigned to the authenticated user', 'v3/issues/#list-organization-issues-assigned-to-the-authenticated-user', ['filter', 'state', 'labels', 'sort', 'direction', 'since', 'per_page', 'page'], [], 'squirrel-girl'), ('/orgs/{org}/members', 'get', 'orgs/list-members', 'List organization members', 'rest/reference/orgs#list-organization-members', ['filter', 'role', 'per_page', 'page'], [], ''), ('/orgs/{org}/members/{username}', 'get', 'orgs/check-membership-for-user', 'Check organization membership for a user', 'rest/reference/orgs#check-organization-membership-for-a-user', [], [], ''), ('/orgs/{org}/members/{username}', 'delete', 'orgs/remove-member', 'Remove an organization member', 'rest/reference/orgs#remove-an-organization-member', [], [], ''), ('/orgs/{org}/memberships/{username}', 'get', 'orgs/get-membership-for-user', 'Get organization membership for a user', 'rest/reference/orgs#get-organization-membership-for-a-user', [], [], ''), ('/orgs/{org}/memberships/{username}', 'put', 'orgs/set-membership-for-user', 'Set organization membership for a user', 'rest/reference/orgs#set-organization-membership-for-a-user', [], [['role', str, 'member']], ''), ('/orgs/{org}/memberships/{username}', 'delete', 'orgs/remove-membership-for-user', 'Remove organization membership for a user', 'rest/reference/orgs#remove-organization-membership-for-a-user', [], [], ''), ('/orgs/{org}/migrations', 'get', 'migrations/list-for-org', 'List organization migrations', 'rest/reference/migrations#list-organization-migrations', ['per_page', 'page'], [], 'wyandotte'), ('/orgs/{org}/migrations', 'post', 'migrations/start-for-org', 'Start an organization migration', 'rest/reference/migrations#start-an-organization-migration', [], [['repositories', list], ['lock_repositories', bool, False], ['exclude_attachments', bool, False], ['exclude', list]], ''), ('/orgs/{org}/migrations/{migration_id}', 'get', 'migrations/get-status-for-org', 'Get an organization migration status', 'rest/reference/migrations#get-an-organization-migration-status', [], [], 'wyandotte'), ('/orgs/{org}/migrations/{migration_id}/archive', 'get', 'migrations/download-archive-for-org', 'Download an organization migration archive', 'rest/reference/migrations#download-an-organization-migration-archive', [], [], 'wyandotte'), ('/orgs/{org}/migrations/{migration_id}/archive', 'delete', 'migrations/delete-archive-for-org', 'Delete an organization migration archive', 'rest/reference/migrations#delete-an-organization-migration-archive', [], [], 'wyandotte'), ('/orgs/{org}/migrations/{migration_id}/repos/{repo_name}/lock', 'delete', 'migrations/unlock-repo-for-org', 'Unlock an organization repository', 'rest/reference/migrations#unlock-an-organization-repository', [], [], 'wyandotte'), ('/orgs/{org}/migrations/{migration_id}/repositories', 'get', 'migrations/list-repos-for-org', 'List repositories in an organization migration', 'rest/reference/migrations#list-repositories-in-an-organization-migration', ['per_page', 'page'], [], 'wyandotte'), ('/orgs/{org}/outside_collaborators', 'get', 'orgs/list-outside-collaborators', 'List outside collaborators for an organization', 'rest/reference/orgs#list-outside-collaborators-for-an-organization', ['filter', 'per_page', 'page'], [], ''), ('/orgs/{org}/outside_collaborators/{username}', 'put', 'orgs/convert-member-to-outside-collaborator', 'Convert an organization member to outside collaborator', 'rest/reference/orgs#convert-an-organization-member-to-outside-collaborator', [], [], ''), ('/orgs/{org}/outside_collaborators/{username}', 'delete', 'orgs/remove-outside-collaborator', 'Remove outside collaborator from an organization', 'rest/reference/orgs#remove-outside-collaborator-from-an-organization', [], [], ''), ('/orgs/{org}/projects', 'get', 'projects/list-for-org', 'List organization projects', 'v3/projects/#list-organization-projects', ['state', 'per_page', 'page'], [], 'inertia'), ('/orgs/{org}/projects', 'post', 'projects/create-for-org', 'Create an organization project', 'v3/projects/#create-an-organization-project', [], [['name', str], ['body', str]], 'inertia'), ('/orgs/{org}/public_members', 'get', 'orgs/list-public-members', 'List public organization members', 'rest/reference/orgs#list-public-organization-members', ['per_page', 'page'], [], ''), ('/orgs/{org}/public_members/{username}', 'get', 'orgs/check-public-membership-for-user', 'Check public organization membership for a user', 'rest/reference/orgs#check-public-organization-membership-for-a-user', [], [], ''), ('/orgs/{org}/public_members/{username}', 'put', 'orgs/set-public-membership-for-authenticated-user', 'Set public organization membership for the authenticated user', 'rest/reference/orgs#set-public-organization-membership-for-the-authenticated-user', [], [], ''), ('/orgs/{org}/public_members/{username}', 'delete', 'orgs/remove-public-membership-for-authenticated-user', 'Remove public organization membership for the authenticated user', 'rest/reference/orgs#remove-public-organization-membership-for-the-authenticated-user', [], [], ''), ('/orgs/{org}/repos', 'get', 'repos/list-for-org', 'List organization repositories', 'v3/repos/#list-organization-repositories', ['type', 'sort', 'direction', 'per_page', 'page'], [], 'nebula'), ('/orgs/{org}/repos', 'post', 'repos/create-in-org', 'Create an organization repository', 'v3/repos/#create-an-organization-repository', [], [['name', str], ['description', str], ['homepage', str], ['private', bool, False], ['visibility', str], ['has_issues', bool, True], ['has_projects', bool, True], ['has_wiki', bool, True], ['is_template', bool, False], ['team_id', int], ['auto_init', bool, False], ['gitignore_template', str], ['license_template', str], ['allow_squash_merge', bool, True], ['allow_merge_commit', bool, True], ['allow_rebase_merge', bool, True], ['delete_branch_on_merge', bool, False]], 'nebula'), ('/orgs/{org}/settings/billing/actions', 'get', 'billing/get-github-actions-billing-org', 'Get GitHub Actions billing for an organization', 'v3/billing/#get-github-actions-billing-for-an-organization', [], [], ''), ('/orgs/{org}/settings/billing/packages', 'get', 'billing/get-github-packages-billing-org', 'Get GitHub Packages billing for an organization', 'v3/billing/#get-github-packages-billing-for-an-organization', [], [], ''), ('/orgs/{org}/settings/billing/shared-storage', 'get', 'billing/get-shared-storage-billing-org', 'Get shared storage billing for an organization', 'v3/billing/#get-shared-storage-billing-for-an-organization', [], [], ''), ('/orgs/{org}/team-sync/groups', 'get', 'teams/list-idp-groups-for-org', 'List IdP groups for an organization', 'rest/reference/teams#list-idp-groups-for-an-organization', ['per_page', 'page'], [], ''), ('/orgs/{org}/teams', 'get', 'teams/list', 'List teams', 'v3/teams/#list-teams', ['per_page', 'page'], [], ''), ('/orgs/{org}/teams', 'post', 'teams/create', 'Create a team', 'v3/teams/#create-a-team', [], [['name', str], ['description', str], ['maintainers', list], ['repo_names', list], ['privacy', str], ['permission', str, 'pull'], ['parent_team_id', int]], ''), ('/orgs/{org}/teams/{team_slug}', 'get', 'teams/get-by-name', 'Get a team by name', 'v3/teams/#get-a-team-by-name', [], [], ''), ('/orgs/{org}/teams/{team_slug}', 'patch', 'teams/update-in-org', 'Update a team', 'v3/teams/#update-a-team', [], [['name', str], ['description', str], ['privacy', str], ['permission', str, 'pull'], ['parent_team_id', int]], ''), ('/orgs/{org}/teams/{team_slug}', 'delete', 'teams/delete-in-org', 'Delete a team', 'v3/teams/#delete-a-team', [], [], ''), ('/orgs/{org}/teams/{team_slug}/discussions', 'get', 'teams/list-discussions-in-org', 'List discussions', 'rest/reference/teams#list-discussions', ['direction', 'per_page', 'page'], [], 'squirrel-girl'), ('/orgs/{org}/teams/{team_slug}/discussions', 'post', 'teams/create-discussion-in-org', 'Create a discussion', 'rest/reference/teams#create-a-discussion', [], [['title', str], ['body', str], ['private', bool, False]], 'squirrel-girl'), ('/orgs/{org}/teams/{team_slug}/discussions/{discussion_number}', 'get', 'teams/get-discussion-in-org', 'Get a discussion', 'rest/reference/teams#get-a-discussion', [], [], 'squirrel-girl'), ('/orgs/{org}/teams/{team_slug}/discussions/{discussion_number}', 'patch', 'teams/update-discussion-in-org', 'Update a discussion', 'rest/reference/teams#update-a-discussion', [], [['title', str], ['body', str]], 'squirrel-girl'), ('/orgs/{org}/teams/{team_slug}/discussions/{discussion_number}', 'delete', 'teams/delete-discussion-in-org', 'Delete a discussion', 'rest/reference/teams#delete-a-discussion', [], [], ''), ('/orgs/{org}/teams/{team_slug}/discussions/{discussion_number}/comments', 'get', 'teams/list-discussion-comments-in-org', 'List discussion comments', 'rest/reference/teams#list-discussion-comments', ['direction', 'per_page', 'page'], [], 'squirrel-girl'), ('/orgs/{org}/teams/{team_slug}/discussions/{discussion_number}/comments', 'post', 'teams/create-discussion-comment-in-org', 'Create a discussion comment', 'rest/reference/teams#create-a-discussion-comment', [], [['body', str]], 'squirrel-girl'), ('/orgs/{org}/teams/{team_slug}/discussions/{discussion_number}/comments/{comment_number}', 'get', 'teams/get-discussion-comment-in-org', 'Get a discussion comment', 'rest/reference/teams#get-a-discussion-comment', [], [], 'squirrel-girl'), ('/orgs/{org}/teams/{team_slug}/discussions/{discussion_number}/comments/{comment_number}', 'patch', 'teams/update-discussion-comment-in-org', 'Update a discussion comment', 'rest/reference/teams#update-a-discussion-comment', [], [['body', str]], 'squirrel-girl'), ('/orgs/{org}/teams/{team_slug}/discussions/{discussion_number}/comments/{comment_number}', 'delete', 'teams/delete-discussion-comment-in-org', 'Delete a discussion comment', 'rest/reference/teams#delete-a-discussion-comment', [], [], ''), ('/orgs/{org}/teams/{team_slug}/discussions/{discussion_number}/comments/{comment_number}/reactions', 'get', 'reactions/list-for-team-discussion-comment-in-org', 'List reactions for a team discussion comment', 'v3/reactions/#list-reactions-for-a-team-discussion-comment', ['content', 'per_page', 'page'], [], 'squirrel-girl'), ('/orgs/{org}/teams/{team_slug}/discussions/{discussion_number}/comments/{comment_number}/reactions', 'post', 'reactions/create-for-team-discussion-comment-in-org', 'Create reaction for a team discussion comment', 'v3/reactions/#create-reaction-for-a-team-discussion-comment', [], [['content', str]], 'squirrel-girl'), ('/orgs/{org}/teams/{team_slug}/discussions/{discussion_number}/comments/{comment_number}/reactions/{reaction_id}', 'delete', 'reactions/delete-for-team-discussion-comment', 'Delete team discussion comment reaction', 'v3/reactions/#delete-team-discussion-comment-reaction', [], [], 'squirrel-girl'), ('/orgs/{org}/teams/{team_slug}/discussions/{discussion_number}/reactions', 'get', 'reactions/list-for-team-discussion-in-org', 'List reactions for a team discussion', 'v3/reactions/#list-reactions-for-a-team-discussion', ['content', 'per_page', 'page'], [], 'squirrel-girl'), ('/orgs/{org}/teams/{team_slug}/discussions/{discussion_number}/reactions', 'post', 'reactions/create-for-team-discussion-in-org', 'Create reaction for a team discussion', 'v3/reactions/#create-reaction-for-a-team-discussion', [], [['content', str]], 'squirrel-girl'), ('/orgs/{org}/teams/{team_slug}/discussions/{discussion_number}/reactions/{reaction_id}', 'delete', 'reactions/delete-for-team-discussion', 'Delete team discussion reaction', 'v3/reactions/#delete-team-discussion-reaction', [], [], 'squirrel-girl'), ('/orgs/{org}/teams/{team_slug}/invitations', 'get', 'teams/list-pending-invitations-in-org', 'List pending team invitations', 'rest/reference/teams#list-pending-team-invitations', ['per_page', 'page'], [], ''), ('/orgs/{org}/teams/{team_slug}/members', 'get', 'teams/list-members-in-org', 'List team members', 'rest/reference/teams#list-team-members', ['role', 'per_page', 'page'], [], ''), ('/orgs/{org}/teams/{team_slug}/memberships/{username}', 'get', 'teams/get-membership-for-user-in-org', 'Get team membership for a user', 'rest/reference/teams#get-team-membership-for-a-user', [], [], ''), ('/orgs/{org}/teams/{team_slug}/memberships/{username}', 'put', 'teams/add-or-update-membership-for-user-in-org', 'Add or update team membership for a user', 'rest/reference/teams#add-or-update-team-membership-for-a-user', [], [['role', str, 'member']], ''), ('/orgs/{org}/teams/{team_slug}/memberships/{username}', 'delete', 'teams/remove-membership-for-user-in-org', 'Remove team membership for a user', 'rest/reference/teams#remove-team-membership-for-a-user', [], [], ''), ('/orgs/{org}/teams/{team_slug}/projects', 'get', 'teams/list-projects-in-org', 'List team projects', 'v3/teams/#list-team-projects', ['per_page', 'page'], [], 'inertia'), ('/orgs/{org}/teams/{team_slug}/projects/{project_id}', 'get', 'teams/check-permissions-for-project-in-org', 'Check team permissions for a project', 'v3/teams/#check-team-permissions-for-a-project', [], [], 'inertia'), ('/orgs/{org}/teams/{team_slug}/projects/{project_id}', 'put', 'teams/add-or-update-project-permissions-in-org', 'Add or update team project permissions', 'v3/teams/#add-or-update-team-project-permissions', [], [['permission', str]], 'inertia'), ('/orgs/{org}/teams/{team_slug}/projects/{project_id}', 'delete', 'teams/remove-project-in-org', 'Remove a project from a team', 'v3/teams/#remove-a-project-from-a-team', [], [], ''), ('/orgs/{org}/teams/{team_slug}/repos', 'get', 'teams/list-repos-in-org', 'List team repositories', 'v3/teams/#list-team-repositories', ['per_page', 'page'], [], ''), ('/orgs/{org}/teams/{team_slug}/repos/{owner}/{repo}', 'get', 'teams/check-permissions-for-repo-in-org', 'Check team permissions for a repository', 'v3/teams/#check-team-permissions-for-a-repository', [], [], ''), ('/orgs/{org}/teams/{team_slug}/repos/{owner}/{repo}', 'put', 'teams/add-or-update-repo-permissions-in-org', 'Add or update team repository permissions', 'v3/teams/#add-or-update-team-repository-permissions', [], [['permission', str]], ''), ('/orgs/{org}/teams/{team_slug}/repos/{owner}/{repo}', 'delete', 'teams/remove-repo-in-org', 'Remove a repository from a team', 'v3/teams/#remove-a-repository-from-a-team', [], [], ''), ('/orgs/{org}/teams/{team_slug}/team-sync/group-mappings', 'get', 'teams/list-idp-groups-in-org', 'List IdP groups for a team', 'rest/reference/teams#list-idp-groups-for-a-team', [], [], ''), ('/orgs/{org}/teams/{team_slug}/team-sync/group-mappings', 'patch', 'teams/create-or-update-idp-group-connections-in-org', 'Create or update IdP group connections', 'rest/reference/teams#create-or-update-idp-group-connections', [], [['groups', list]], ''), ('/orgs/{org}/teams/{team_slug}/teams', 'get', 'teams/list-child-in-org', 'List child teams', 'v3/teams/#list-child-teams', ['per_page', 'page'], [], ''), ('/projects/columns/cards/{card_id}', 'get', 'projects/get-card', 'Get a project card', 'rest/reference/projects#get-a-project-card', [], [], 'inertia'), ('/projects/columns/cards/{card_id}', 'patch', 'projects/update-card', 'Update an existing project card', 'rest/reference/projects#update-a-project-card', [], [['note', str], ['archived', bool]], 'inertia'), ('/projects/columns/cards/{card_id}', 'delete', 'projects/delete-card', 'Delete a project card', 'rest/reference/projects#delete-a-project-card', [], [], 'inertia'), ('/projects/columns/cards/{card_id}/moves', 'post', 'projects/move-card', 'Move a project card', 'rest/reference/projects#move-a-project-card', [], [['position', str], ['column_id', int]], 'inertia'), ('/projects/columns/{column_id}', 'get', 'projects/get-column', 'Get a project column', 'rest/reference/projects#get-a-project-column', [], [], 'inertia'), ('/projects/columns/{column_id}', 'patch', 'projects/update-column', 'Update an existing project column', 'rest/reference/projects#update-a-project-column', [], [['name', str]], 'inertia'), ('/projects/columns/{column_id}', 'delete', 'projects/delete-column', 'Delete a project column', 'rest/reference/projects#delete-a-project-column', [], [], 'inertia'), ('/projects/columns/{column_id}/cards', 'get', 'projects/list-cards', 'List project cards', 'rest/reference/projects#list-project-cards', ['archived_state', 'per_page', 'page'], [], 'inertia'), ('/projects/columns/{column_id}/cards', 'post', 'projects/create-card', 'Create a project card', 'rest/reference/projects#create-a-project-card', [], [], 'inertia'), ('/projects/columns/{column_id}/moves', 'post', 'projects/move-column', 'Move a project column', 'rest/reference/projects#move-a-project-column', [], [['position', str]], 'inertia'), ('/projects/{project_id}', 'get', 'projects/get', 'Get a project', 'v3/projects/#get-a-project', [], [], 'inertia'), ('/projects/{project_id}', 'patch', 'projects/update', 'Update a project', 'v3/projects/#update-a-project', [], [['name', str], ['body', str], ['state', str], ['organization_permission', str], ['private', bool]], 'inertia'), ('/projects/{project_id}', 'delete', 'projects/delete', 'Delete a project', 'v3/projects/#delete-a-project', [], [], 'inertia'), ('/projects/{project_id}/collaborators', 'get', 'projects/list-collaborators', 'List project collaborators', 'rest/reference/projects#list-project-collaborators', ['affiliation', 'per_page', 'page'], [], 'inertia'), ('/projects/{project_id}/collaborators/{username}', 'put', 'projects/add-collaborator', 'Add project collaborator', 'rest/reference/projects#add-project-collaborator', [], [['permission', str, 'write']], 'inertia'), ('/projects/{project_id}/collaborators/{username}', 'delete', 'projects/remove-collaborator', 'Remove user as a collaborator', 'rest/reference/projects#remove-project-collaborator', [], [], 'inertia'), ('/projects/{project_id}/collaborators/{username}/permission', 'get', 'projects/get-permission-for-user', 'Get project permission for a user', 'rest/reference/projects#get-project-permission-for-a-user', [], [], 'inertia'), ('/projects/{project_id}/columns', 'get', 'projects/list-columns', 'List project columns', 'rest/reference/projects#list-project-columns', ['per_page', 'page'], [], 'inertia'), ('/projects/{project_id}/columns', 'post', 'projects/create-column', 'Create a project column', 'rest/reference/projects#create-a-project-column', [], [['name', str]], 'inertia'), ('/rate_limit', 'get', 'rate-limit/get', 'Get rate limit status for the authenticated user', 'v3/rate_limit/#get-rate-limit-status-for-the-authenticated-user', [], [], ''), ('/reactions/{reaction_id}', 'delete', 'reactions/delete-legacy', 'Delete a reaction (Legacy)', 'v3/reactions/#delete-a-reaction-legacy', [], [], 'squirrel-girl'), ('/repos/{owner}/{repo}', 'get', 'repos/get', 'Get a repository', 'v3/repos/#get-a-repository', [], [], 'nebula'), ('/repos/{owner}/{repo}', 'patch', 'repos/update', 'Update a repository', 'v3/repos/#update-a-repository', [], [['name', str], ['description', str], ['homepage', str], ['private', bool, False], ['visibility', str], ['has_issues', bool, True], ['has_projects', bool, True], ['has_wiki', bool, True], ['is_template', bool, False], ['default_branch', str], ['allow_squash_merge', bool, True], ['allow_merge_commit', bool, True], ['allow_rebase_merge', bool, True], ['delete_branch_on_merge', bool, False], ['archived', bool, False]], 'nebula'), ('/repos/{owner}/{repo}', 'delete', 'repos/delete', 'Delete a repository', 'v3/repos/#delete-a-repository', [], [], ''), ('/repos/{owner}/{repo}/actions/artifacts', 'get', 'actions/list-artifacts-for-repo', 'List artifacts for a repository', 'rest/reference/actions#list-artifacts-for-a-repository', ['per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/actions/artifacts/{artifact_id}', 'get', 'actions/get-artifact', 'Get an artifact', 'rest/reference/actions#get-an-artifact', [], [], ''), ('/repos/{owner}/{repo}/actions/artifacts/{artifact_id}', 'delete', 'actions/delete-artifact', 'Delete an artifact', 'rest/reference/actions#delete-an-artifact', [], [], ''), ('/repos/{owner}/{repo}/actions/artifacts/{artifact_id}/{archive_format}', 'get', 'actions/download-artifact', 'Download an artifact', 'rest/reference/actions#download-an-artifact', [], [], ''), ('/repos/{owner}/{repo}/actions/jobs/{job_id}', 'get', 'actions/get-job-for-workflow-run', 'Get a job for a workflow run', 'rest/reference/actions#get-a-job-for-a-workflow-run', [], [], ''), ('/repos/{owner}/{repo}/actions/jobs/{job_id}/logs', 'get', 'actions/download-job-logs-for-workflow-run', 'Download job logs for a workflow run', 'rest/reference/actions#download-job-logs-for-a-workflow-run', [], [], ''), ('/repos/{owner}/{repo}/actions/permissions', 'get', 'actions/get-github-actions-permissions-repository', 'Get GitHub Actions permissions for a repository', 'rest/reference/actions#get-github-actions-permissions-for-a-repository', [], [], ''), ('/repos/{owner}/{repo}/actions/permissions', 'put', 'actions/set-github-actions-permissions-repository', 'Set GitHub Actions permissions for a repository', 'rest/reference/actions#set-github-actions-permissions-for-a-repository', [], [['enabled', bool], ['allowed_actions', str]], ''), ('/repos/{owner}/{repo}/actions/permissions/selected-actions', 'get', 'actions/get-allowed-actions-repository', 'Get allowed actions for a repository', 'rest/reference/actions#get-allowed-actions-for-a-repository', [], [], ''), ('/repos/{owner}/{repo}/actions/permissions/selected-actions', 'put', 'actions/set-allowed-actions-repository', 'Set allowed actions for a repository', 'rest/reference/actions#set-allowed-actions-for-a-repository', [], [['github_owned_allowed', bool], ['verified_allowed', bool], ['patterns_allowed', list]], ''), ('/repos/{owner}/{repo}/actions/runners', 'get', 'actions/list-self-hosted-runners-for-repo', 'List self-hosted runners for a repository', 'rest/reference/actions#list-self-hosted-runners-for-a-repository', ['per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/actions/runners/downloads', 'get', 'actions/list-runner-applications-for-repo', 'List runner applications for a repository', 'rest/reference/actions#list-runner-applications-for-a-repository', [], [], ''), ('/repos/{owner}/{repo}/actions/runners/registration-token', 'post', 'actions/create-registration-token-for-repo', 'Create a registration token for a repository', 'rest/reference/actions#create-a-registration-token-for-a-repository', [], [], ''), ('/repos/{owner}/{repo}/actions/runners/remove-token', 'post', 'actions/create-remove-token-for-repo', 'Create a remove token for a repository', 'rest/reference/actions#create-a-remove-token-for-a-repository', [], [], ''), ('/repos/{owner}/{repo}/actions/runners/{runner_id}', 'get', 'actions/get-self-hosted-runner-for-repo', 'Get a self-hosted runner for a repository', 'rest/reference/actions#get-a-self-hosted-runner-for-a-repository', [], [], ''), ('/repos/{owner}/{repo}/actions/runners/{runner_id}', 'delete', 'actions/delete-self-hosted-runner-from-repo', 'Delete a self-hosted runner from a repository', 'rest/reference/actions#delete-a-self-hosted-runner-from-a-repository', [], [], ''), ('/repos/{owner}/{repo}/actions/runs', 'get', 'actions/list-workflow-runs-for-repo', 'List workflow runs for a repository', 'rest/reference/actions#list-workflow-runs-for-a-repository', ['actor', 'branch', 'event', 'status', 'per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/actions/runs/{run_id}', 'get', 'actions/get-workflow-run', 'Get a workflow run', 'rest/reference/actions#get-a-workflow-run', [], [], ''), ('/repos/{owner}/{repo}/actions/runs/{run_id}', 'delete', 'actions/delete-workflow-run', 'Delete a workflow run', 'rest/reference/actions#delete-a-workflow-run', [], [], ''), ('/repos/{owner}/{repo}/actions/runs/{run_id}/artifacts', 'get', 'actions/list-workflow-run-artifacts', 'List workflow run artifacts', 'rest/reference/actions#list-workflow-run-artifacts', ['per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/actions/runs/{run_id}/cancel', 'post', 'actions/cancel-workflow-run', 'Cancel a workflow run', 'rest/reference/actions#cancel-a-workflow-run', [], [], ''), ('/repos/{owner}/{repo}/actions/runs/{run_id}/jobs', 'get', 'actions/list-jobs-for-workflow-run', 'List jobs for a workflow run', 'rest/reference/actions#list-jobs-for-a-workflow-run', ['filter', 'per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/actions/runs/{run_id}/logs', 'get', 'actions/download-workflow-run-logs', 'Download workflow run logs', 'rest/reference/actions#download-workflow-run-logs', [], [], ''), ('/repos/{owner}/{repo}/actions/runs/{run_id}/logs', 'delete', 'actions/delete-workflow-run-logs', 'Delete workflow run logs', 'rest/reference/actions#delete-workflow-run-logs', [], [], ''), ('/repos/{owner}/{repo}/actions/runs/{run_id}/rerun', 'post', 'actions/re-run-workflow', 'Re-run a workflow', 'rest/reference/actions#re-run-a-workflow', [], [], ''), ('/repos/{owner}/{repo}/actions/runs/{run_id}/timing', 'get', 'actions/get-workflow-run-usage', 'Get workflow run usage', 'rest/reference/actions#get-workflow-run-usage', [], [], ''), ('/repos/{owner}/{repo}/actions/secrets', 'get', 'actions/list-repo-secrets', 'List repository secrets', 'rest/reference/actions#list-repository-secrets', ['per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/actions/secrets/public-key', 'get', 'actions/get-repo-public-key', 'Get a repository public key', 'rest/reference/actions#get-a-repository-public-key', [], [], ''), ('/repos/{owner}/{repo}/actions/secrets/{secret_name}', 'get', 'actions/get-repo-secret', 'Get a repository secret', 'rest/reference/actions#get-a-repository-secret', [], [], ''), ('/repos/{owner}/{repo}/actions/secrets/{secret_name}', 'put', 'actions/create-or-update-repo-secret', 'Create or update a repository secret', 'rest/reference/actions#create-or-update-a-repository-secret', [], [['encrypted_value', str], ['key_id', str]], ''), ('/repos/{owner}/{repo}/actions/secrets/{secret_name}', 'delete', 'actions/delete-repo-secret', 'Delete a repository secret', 'rest/reference/actions#delete-a-repository-secret', [], [], ''), ('/repos/{owner}/{repo}/actions/workflows', 'get', 'actions/list-repo-workflows', 'List repository workflows', 'rest/reference/actions#list-repository-workflows', ['per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/actions/workflows/{workflow_id}', 'get', 'actions/get-workflow', 'Get a workflow', 'rest/reference/actions#get-a-workflow', [], [], ''), ('/repos/{owner}/{repo}/actions/workflows/{workflow_id}/disable', 'put', 'actions/disable-workflow', 'Disable a workflow', 'rest/reference/actions#disable-a-workflow', [], [], ''), ('/repos/{owner}/{repo}/actions/workflows/{workflow_id}/dispatches', 'post', 'actions/create-workflow-dispatch', 'Create a workflow dispatch event', 'rest/reference/actions#create-a-workflow-dispatch-event', [], [['ref', str], ['inputs', dict]], ''), ('/repos/{owner}/{repo}/actions/workflows/{workflow_id}/enable', 'put', 'actions/enable-workflow', 'Enable a workflow', 'rest/reference/actions#enable-a-workflow', [], [], ''), ('/repos/{owner}/{repo}/actions/workflows/{workflow_id}/runs', 'get', 'actions/list-workflow-runs', 'List workflow runs', 'rest/reference/actions#list-workflow-runs', ['actor', 'branch', 'event', 'status', 'per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/actions/workflows/{workflow_id}/timing', 'get', 'actions/get-workflow-usage', 'Get workflow usage', 'rest/reference/actions#get-workflow-usage', [], [], ''), ('/repos/{owner}/{repo}/assignees', 'get', 'issues/list-assignees', 'List assignees', 'rest/reference/issues#list-assignees', ['per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/assignees/{assignee}', 'get', 'issues/check-user-can-be-assigned', 'Check if a user can be assigned', 'rest/reference/issues#check-if-a-user-can-be-assigned', [], [], ''), ('/repos/{owner}/{repo}/automated-security-fixes', 'put', 'repos/enable-automated-security-fixes', 'Enable automated security fixes', 'v3/repos/#enable-automated-security-fixes', [], [], 'london'), ('/repos/{owner}/{repo}/automated-security-fixes', 'delete', 'repos/disable-automated-security-fixes', 'Disable automated security fixes', 'v3/repos/#disable-automated-security-fixes', [], [], 'london'), ('/repos/{owner}/{repo}/branches', 'get', 'repos/list-branches', 'List branches', 'rest/reference/repos#list-branches', ['protected', 'per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/branches/{branch}', 'get', 'repos/get-branch', 'Get a branch', 'rest/reference/repos#get-a-branch', [], [], ''), ('/repos/{owner}/{repo}/branches/{branch}/protection', 'get', 'repos/get-branch-protection', 'Get branch protection', 'rest/reference/repos#get-branch-protection', [], [], 'luke-cage'), ('/repos/{owner}/{repo}/branches/{branch}/protection', 'put', 'repos/update-branch-protection', 'Update branch protection', 'rest/reference/repos#update-branch-protection', [], [['required_status_checks', dict], ['enforce_admins', bool], ['required_pull_request_reviews', dict], ['restrictions', dict], ['required_linear_history', bool], ['allow_force_pushes', bool], ['allow_deletions', bool]], 'luke-cage'), ('/repos/{owner}/{repo}/branches/{branch}/protection', 'delete', 'repos/delete-branch-protection', 'Delete branch protection', 'rest/reference/repos#delete-branch-protection', [], [], ''), ('/repos/{owner}/{repo}/branches/{branch}/protection/enforce_admins', 'get', 'repos/get-admin-branch-protection', 'Get admin branch protection', 'rest/reference/repos#get-admin-branch-protection', [], [], ''), ('/repos/{owner}/{repo}/branches/{branch}/protection/enforce_admins', 'post', 'repos/set-admin-branch-protection', 'Set admin branch protection', 'rest/reference/repos#set-admin-branch-protection', [], [], ''), ('/repos/{owner}/{repo}/branches/{branch}/protection/enforce_admins', 'delete', 'repos/delete-admin-branch-protection', 'Delete admin branch protection', 'rest/reference/repos#delete-admin-branch-protection', [], [], ''), ('/repos/{owner}/{repo}/branches/{branch}/protection/required_pull_request_reviews', 'get', 'repos/get-pull-request-review-protection', 'Get pull request review protection', 'rest/reference/repos#get-pull-request-review-protection', [], [], 'luke-cage'), ('/repos/{owner}/{repo}/branches/{branch}/protection/required_pull_request_reviews', 'patch', 'repos/update-pull-request-review-protection', 'Update pull request review protection', 'rest/reference/repos#update-pull-request-review-protection', [], [['dismissal_restrictions', dict], ['dismiss_stale_reviews', bool], ['require_code_owner_reviews', bool], ['required_approving_review_count', int]], 'luke-cage'), ('/repos/{owner}/{repo}/branches/{branch}/protection/required_pull_request_reviews', 'delete', 'repos/delete-pull-request-review-protection', 'Delete pull request review protection', 'rest/reference/repos#delete-pull-request-review-protection', [], [], ''), ('/repos/{owner}/{repo}/branches/{branch}/protection/required_signatures', 'get', 'repos/get-commit-signature-protection', 'Get commit signature protection', 'rest/reference/repos#get-commit-signature-protection', [], [], 'zzzax'), ('/repos/{owner}/{repo}/branches/{branch}/protection/required_signatures', 'post', 'repos/create-commit-signature-protection', 'Create commit signature protection', 'rest/reference/repos#create-commit-signature-protection', [], [], 'zzzax'), ('/repos/{owner}/{repo}/branches/{branch}/protection/required_signatures', 'delete', 'repos/delete-commit-signature-protection', 'Delete commit signature protection', 'rest/reference/repos#delete-commit-signature-protection', [], [], 'zzzax'), ('/repos/{owner}/{repo}/branches/{branch}/protection/required_status_checks', 'get', 'repos/get-status-checks-protection', 'Get status checks protection', 'rest/reference/repos#get-status-checks-protection', [], [], ''), ('/repos/{owner}/{repo}/branches/{branch}/protection/required_status_checks', 'patch', 'repos/update-status-check-protection', 'Update status check protection', 'rest/reference/repos#update-status-check-potection', [], [['strict', bool], ['contexts', list]], ''), ('/repos/{owner}/{repo}/branches/{branch}/protection/required_status_checks', 'delete', 'repos/remove-status-check-protection', 'Remove status check protection', 'rest/reference/repos#remove-status-check-protection', [], [], ''), ('/repos/{owner}/{repo}/branches/{branch}/protection/required_status_checks/contexts', 'get', 'repos/get-all-status-check-contexts', 'Get all status check contexts', 'rest/reference/repos#get-all-status-check-contexts', [], [], ''), ('/repos/{owner}/{repo}/branches/{branch}/protection/required_status_checks/contexts', 'post', 'repos/add-status-check-contexts', 'Add status check contexts', 'rest/reference/repos#add-status-check-contexts', [], [['contexts', list]], ''), ('/repos/{owner}/{repo}/branches/{branch}/protection/required_status_checks/contexts', 'put', 'repos/set-status-check-contexts', 'Set status check contexts', 'rest/reference/repos#set-status-check-contexts', [], [['contexts', list]], ''), ('/repos/{owner}/{repo}/branches/{branch}/protection/required_status_checks/contexts', 'delete', 'repos/remove-status-check-contexts', 'Remove status check contexts', 'rest/reference/repos#remove-status-check-contexts', [], [['contexts', list]], ''), ('/repos/{owner}/{repo}/branches/{branch}/protection/restrictions', 'get', 'repos/get-access-restrictions', 'Get access restrictions', 'rest/reference/repos#get-access-restrictions', [], [], ''), ('/repos/{owner}/{repo}/branches/{branch}/protection/restrictions', 'delete', 'repos/delete-access-restrictions', 'Delete access restrictions', 'rest/reference/repos#delete-access-restrictions', [], [], ''), ('/repos/{owner}/{repo}/branches/{branch}/protection/restrictions/apps', 'get', 'repos/get-apps-with-access-to-protected-branch', 'Get apps with access to the protected branch', 'rest/reference/repos#list-apps-with-access-to-the-protected-branch', [], [], ''), ('/repos/{owner}/{repo}/branches/{branch}/protection/restrictions/apps', 'post', 'repos/add-app-access-restrictions', 'Add app access restrictions', 'rest/reference/repos#add-app-access-restrictions', [], [['apps', list]], ''), ('/repos/{owner}/{repo}/branches/{branch}/protection/restrictions/apps', 'put', 'repos/set-app-access-restrictions', 'Set app access restrictions', 'rest/reference/repos#set-app-access-restrictions', [], [['apps', list]], ''), ('/repos/{owner}/{repo}/branches/{branch}/protection/restrictions/apps', 'delete', 'repos/remove-app-access-restrictions', 'Remove app access restrictions', 'rest/reference/repos#remove-app-access-restrictions', [], [['apps', list]], ''), ('/repos/{owner}/{repo}/branches/{branch}/protection/restrictions/teams', 'get', 'repos/get-teams-with-access-to-protected-branch', 'Get teams with access to the protected branch', 'rest/reference/repos#list-teams-with-access-to-the-protected-branch', [], [], ''), ('/repos/{owner}/{repo}/branches/{branch}/protection/restrictions/teams', 'post', 'repos/add-team-access-restrictions', 'Add team access restrictions', 'rest/reference/repos#add-team-access-restrictions', [], [['teams', list]], ''), ('/repos/{owner}/{repo}/branches/{branch}/protection/restrictions/teams', 'put', 'repos/set-team-access-restrictions', 'Set team access restrictions', 'rest/reference/repos#set-team-access-restrictions', [], [['teams', list]], ''), ('/repos/{owner}/{repo}/branches/{branch}/protection/restrictions/teams', 'delete', 'repos/remove-team-access-restrictions', 'Remove team access restrictions', 'rest/reference/repos#remove-team-access-restrictions', [], [['teams', list]], ''), ('/repos/{owner}/{repo}/branches/{branch}/protection/restrictions/users', 'get', 'repos/get-users-with-access-to-protected-branch', 'Get users with access to the protected branch', 'rest/reference/repos#list-users-with-access-to-the-protected-branch', [], [], ''), ('/repos/{owner}/{repo}/branches/{branch}/protection/restrictions/users', 'post', 'repos/add-user-access-restrictions', 'Add user access restrictions', 'rest/reference/repos#add-user-access-restrictions', [], [['users', list]], ''), ('/repos/{owner}/{repo}/branches/{branch}/protection/restrictions/users', 'put', 'repos/set-user-access-restrictions', 'Set user access restrictions', 'rest/reference/repos#set-user-access-restrictions', [], [['users', list]], ''), ('/repos/{owner}/{repo}/branches/{branch}/protection/restrictions/users', 'delete', 'repos/remove-user-access-restrictions', 'Remove user access restrictions', 'rest/reference/repos#remove-user-access-restrictions', [], [['users', list]], ''), ('/repos/{owner}/{repo}/check-runs', 'post', 'checks/create', 'Create a check run', 'rest/reference/checks#create-a-check-run', [], [['name', str], ['head_sha', str], ['details_url', str], ['external_id', str], ['status', str, 'queued'], ['started_at', str], ['conclusion', str], ['completed_at', str], ['output', dict], ['actions', list]], ''), ('/repos/{owner}/{repo}/check-runs/{check_run_id}', 'get', 'checks/get', 'Get a check run', 'rest/reference/checks#get-a-check-run', [], [], ''), ('/repos/{owner}/{repo}/check-runs/{check_run_id}', 'patch', 'checks/update', 'Update a check run', 'rest/reference/checks#update-a-check-run', [], [['name', str], ['details_url', str], ['external_id', str], ['started_at', str], ['status', str], ['conclusion', str], ['completed_at', str], ['output', dict], ['actions', list]], ''), ('/repos/{owner}/{repo}/check-runs/{check_run_id}/annotations', 'get', 'checks/list-annotations', 'List check run annotations', 'rest/reference/checks#list-check-run-annotations', ['per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/check-suites', 'post', 'checks/create-suite', 'Create a check suite', 'rest/reference/checks#create-a-check-suite', [], [['head_sha', str]], ''), ('/repos/{owner}/{repo}/check-suites/preferences', 'patch', 'checks/set-suites-preferences', 'Update repository preferences for check suites', 'rest/reference/checks#update-repository-preferences-for-check-suites', [], [['auto_trigger_checks', list]], ''), ('/repos/{owner}/{repo}/check-suites/{check_suite_id}', 'get', 'checks/get-suite', 'Get a check suite', 'rest/reference/checks#get-a-check-suite', [], [], ''), ('/repos/{owner}/{repo}/check-suites/{check_suite_id}/check-runs', 'get', 'checks/list-for-suite', 'List check runs in a check suite', 'rest/reference/checks#list-check-runs-in-a-check-suite', ['check_name', 'status', 'filter', 'per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/check-suites/{check_suite_id}/rerequest', 'post', 'checks/rerequest-suite', 'Rerequest a check suite', 'rest/reference/checks#rerequest-a-check-suite', [], [], ''), ('/repos/{owner}/{repo}/code-scanning/alerts', 'get', 'code-scanning/list-alerts-for-repo', 'List code scanning alerts for a repository', 'v3/code-scanning/#list-code-scanning-alerts-for-a-repository', ['state', 'ref'], [], ''), ('/repos/{owner}/{repo}/code-scanning/alerts/{alert_number}', 'get', 'code-scanning/get-alert', 'Get a code scanning alert', 'v3/code-scanning/#get-a-code-scanning-alert', [], [], ''), ('/repos/{owner}/{repo}/code-scanning/alerts/{alert_number}', 'patch', 'code-scanning/update-alert', 'Update a code scanning alert', 'v3/code-scanning/#upload-a-code-scanning-alert', [], [['state', str], ['dismissed_reason', str]], ''), ('/repos/{owner}/{repo}/code-scanning/analyses', 'get', 'code-scanning/list-recent-analyses', 'List recent code scanning analyses for a repository', 'v3/code-scanning/#list-recent-analyses', ['ref', 'tool_name'], [], ''), ('/repos/{owner}/{repo}/code-scanning/sarifs', 'post', 'code-scanning/upload-sarif', 'Upload a SARIF file', 'v3/code-scanning/#upload-a-sarif-analysis', [], [['commit_sha', str], ['ref', str], ['sarif', str], ['checkout_uri', str], ['started_at', str], ['tool_name', str]], ''), ('/repos/{owner}/{repo}/collaborators', 'get', 'repos/list-collaborators', 'List repository collaborators', 'rest/reference/repos#list-repository-collaborators', ['affiliation', 'per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/collaborators/{username}', 'get', 'repos/check-collaborator', 'Check if a user is a repository collaborator', 'rest/reference/repos#check-if-a-user-is-a-repository-collaborator', [], [], ''), ('/repos/{owner}/{repo}/collaborators/{username}', 'put', 'repos/add-collaborator', 'Add a repository collaborator', 'rest/reference/repos#add-a-repository-collaborator', [], [['permission', str, 'push'], ['permissions', str]], ''), ('/repos/{owner}/{repo}/collaborators/{username}', 'delete', 'repos/remove-collaborator', 'Remove a repository collaborator', 'rest/reference/repos#remove-a-repository-collaborator', [], [], ''), ('/repos/{owner}/{repo}/collaborators/{username}/permission', 'get', 'repos/get-collaborator-permission-level', 'Get repository permissions for a user', 'rest/reference/repos#get-repository-permissions-for-a-user', [], [], ''), ('/repos/{owner}/{repo}/comments', 'get', 'repos/list-commit-comments-for-repo', 'List commit comments for a repository', 'rest/reference/repos#list-commit-comments-for-a-repository', ['per_page', 'page'], [], 'squirrel-girl'), ('/repos/{owner}/{repo}/comments/{comment_id}', 'get', 'repos/get-commit-comment', 'Get a commit comment', 'rest/reference/repos#get-a-commit-comment', [], [], 'squirrel-girl'), ('/repos/{owner}/{repo}/comments/{comment_id}', 'patch', 'repos/update-commit-comment', 'Update a commit comment', 'rest/reference/repos#update-a-commit-comment', [], [['body', str]], ''), ('/repos/{owner}/{repo}/comments/{comment_id}', 'delete', 'repos/delete-commit-comment', 'Delete a commit comment', 'rest/reference/repos#delete-a-commit-comment', [], [], ''), ('/repos/{owner}/{repo}/comments/{comment_id}/reactions', 'get', 'reactions/list-for-commit-comment', 'List reactions for a commit comment', 'v3/reactions/#list-reactions-for-a-commit-comment', ['content', 'per_page', 'page'], [], 'squirrel-girl'), ('/repos/{owner}/{repo}/comments/{comment_id}/reactions', 'post', 'reactions/create-for-commit-comment', 'Create reaction for a commit comment', 'v3/reactions/#create-reaction-for-a-commit-comment', [], [['content', str]], 'squirrel-girl'), ('/repos/{owner}/{repo}/comments/{comment_id}/reactions/{reaction_id}', 'delete', 'reactions/delete-for-commit-comment', 'Delete a commit comment reaction', 'v3/reactions/#delete-a-commit-comment-reaction', [], [], 'squirrel-girl'), ('/repos/{owner}/{repo}/commits', 'get', 'repos/list-commits', 'List commits', 'rest/reference/repos#list-commits', ['sha', 'path', 'author', 'since', 'until', 'per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/commits/{commit_sha}/branches-where-head', 'get', 'repos/list-branches-for-head-commit', 'List branches for HEAD commit', 'rest/reference/repos#list-branches-for-head-commit', [], [], 'groot'), ('/repos/{owner}/{repo}/commits/{commit_sha}/comments', 'get', 'repos/list-comments-for-commit', 'List commit comments', 'rest/reference/repos#list-commit-comments', ['per_page', 'page'], [], 'squirrel-girl'), ('/repos/{owner}/{repo}/commits/{commit_sha}/comments', 'post', 'repos/create-commit-comment', 'Create a commit comment', 'rest/reference/repos#create-a-commit-comment', [], [['body', str], ['path', str], ['position', int], ['line', int]], ''), ('/repos/{owner}/{repo}/commits/{commit_sha}/pulls', 'get', 'repos/list-pull-requests-associated-with-commit', 'List pull requests associated with a commit', 'rest/reference/repos#list-pull-requests-associated-with-a-commit', ['per_page', 'page'], [], 'groot'), ('/repos/{owner}/{repo}/commits/{ref}', 'get', 'repos/get-commit', 'Get a commit', 'rest/reference/repos#get-a-commit', [], [], ''), ('/repos/{owner}/{repo}/commits/{ref}/check-runs', 'get', 'checks/list-for-ref', 'List check runs for a Git reference', 'rest/reference/checks#list-check-runs-for-a-git-reference', ['check_name', 'status', 'filter', 'per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/commits/{ref}/check-suites', 'get', 'checks/list-suites-for-ref', 'List check suites for a Git reference', 'rest/reference/checks#list-check-suites-for-a-git-reference', ['app_id', 'check_name', 'per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/commits/{ref}/status', 'get', 'repos/get-combined-status-for-ref', 'Get the combined status for a specific reference', 'rest/reference/repos#get-the-combined-status-for-a-specific-reference', [], [], ''), ('/repos/{owner}/{repo}/commits/{ref}/statuses', 'get', 'repos/list-commit-statuses-for-ref', 'List commit statuses for a reference', 'rest/reference/repos#list-commit-statuses-for-a-reference', ['per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/community/code_of_conduct', 'get', 'codes-of-conduct/get-for-repo', 'Get the code of conduct for a repository', 'v3/codes_of_conduct/#get-the-code-of-conduct-for-a-repository', [], [], 'scarlet-witch'), ('/repos/{owner}/{repo}/community/profile', 'get', 'repos/get-community-profile-metrics', 'Get community profile metrics', 'rest/reference/repos#get-community-profile-metrics', [], [], ''), ('/repos/{owner}/{repo}/compare/{base}...{head}', 'get', 'repos/compare-commits', 'Compare two commits', 'rest/reference/repos#compare-two-commits', [], [], ''), ('/repos/{owner}/{repo}/contents/{path}', 'get', 'repos/get-content', 'Get repository content', 'rest/reference/repos#get-repository-content', ['ref'], [], ''), ('/repos/{owner}/{repo}/contents/{path}', 'put', 'repos/create-or-update-file-contents', 'Create or update file contents', 'rest/reference/repos#create-or-update-file-contents', [], [['message', str], ['content', str], ['sha', str], ['branch', str], ['committer', dict], ['author', dict]], ''), ('/repos/{owner}/{repo}/contents/{path}', 'delete', 'repos/delete-file', 'Delete a file', 'rest/reference/repos#delete-a-file', [], [['message', str], ['sha', str], ['branch', str], ['committer', dict], ['author', dict]], ''), ('/repos/{owner}/{repo}/contributors', 'get', 'repos/list-contributors', 'List repository contributors', 'v3/repos/#list-repository-contributors', ['anon', 'per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/deployments', 'get', 'repos/list-deployments', 'List deployments', 'rest/reference/repos#list-deployments', ['sha', 'ref', 'task', 'environment', 'per_page', 'page'], [], 'ant-man'), ('/repos/{owner}/{repo}/deployments', 'post', 'repos/create-deployment', 'Create a deployment', 'rest/reference/repos#create-a-deployment', [], [['ref', str], ['task', str, 'deploy'], ['auto_merge', bool, True], ['required_contexts', list], ['payload', str, ''], ['environment', str, 'production'], ['description', str, ''], ['transient_environment', bool, False], ['production_environment', bool], ['created_at', str]], 'ant-man'), ('/repos/{owner}/{repo}/deployments/{deployment_id}', 'get', 'repos/get-deployment', 'Get a deployment', 'rest/reference/repos#get-a-deployment', [], [], 'ant-man'), ('/repos/{owner}/{repo}/deployments/{deployment_id}', 'delete', 'repos/delete-deployment', 'Delete a deployment', 'rest/reference/repos#delete-a-deployment', [], [], ''), ('/repos/{owner}/{repo}/deployments/{deployment_id}/statuses', 'get', 'repos/list-deployment-statuses', 'List deployment statuses', 'rest/reference/repos#list-deployment-statuses', ['per_page', 'page'], [], 'flash'), ('/repos/{owner}/{repo}/deployments/{deployment_id}/statuses', 'post', 'repos/create-deployment-status', 'Create a deployment status', 'rest/reference/repos#create-a-deployment-status', [], [['state', str], ['target_url', str, ''], ['log_url', str, ''], ['description', str, ''], ['environment', str], ['environment_url', str, ''], ['auto_inactive', bool]], 'flash'), ('/repos/{owner}/{repo}/deployments/{deployment_id}/statuses/{status_id}', 'get', 'repos/get-deployment-status', 'Get a deployment status', 'rest/reference/repos#get-a-deployment-status', [], [], 'flash'), ('/repos/{owner}/{repo}/dispatches', 'post', 'repos/create-dispatch-event', 'Create a repository dispatch event', 'v3/repos/#create-a-repository-dispatch-event', [], [['event_type', str], ['client_payload', dict]], ''), ('/repos/{owner}/{repo}/events', 'get', 'activity/list-repo-events', 'List repository events', 'rest/reference/activity#list-repository-events', ['per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/forks', 'get', 'repos/list-forks', 'List forks', 'rest/reference/repos#list-forks', ['sort', 'per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/forks', 'post', 'repos/create-fork', 'Create a fork', 'rest/reference/repos#create-a-fork', [], [['organization', str]], ''), ('/repos/{owner}/{repo}/git/blobs', 'post', 'git/create-blob', 'Create a blob', 'rest/reference/git#create-a-blob', [], [['content', str], ['encoding', str, 'utf-8']], ''), ('/repos/{owner}/{repo}/git/blobs/{file_sha}', 'get', 'git/get-blob', 'Get a blob', 'rest/reference/git#get-a-blob', [], [], ''), ('/repos/{owner}/{repo}/git/commits', 'post', 'git/create-commit', 'Create a commit', 'rest/reference/git#create-a-commit', [], [['message', str], ['tree', str], ['parents', list], ['author', dict], ['committer', dict], ['signature', str]], ''), ('/repos/{owner}/{repo}/git/commits/{commit_sha}', 'get', 'git/get-commit', 'Get a commit', 'rest/reference/git#get-a-commit', [], [], ''), ('/repos/{owner}/{repo}/git/matching-refs/{ref}', 'get', 'git/list-matching-refs', 'List matching references', 'rest/reference/git#list-matching-references', ['per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/git/ref/{ref}', 'get', 'git/get-ref', 'Get a reference', 'rest/reference/git#get-a-reference', [], [], ''), ('/repos/{owner}/{repo}/git/refs', 'post', 'git/create-ref', 'Create a reference', 'rest/reference/git#create-a-reference', [], [['ref', str], ['sha', str], ['key', str]], ''), ('/repos/{owner}/{repo}/git/refs/{ref}', 'patch', 'git/update-ref', 'Update a reference', 'rest/reference/git#update-a-reference', [], [['sha', str], ['force', bool, False]], ''), ('/repos/{owner}/{repo}/git/refs/{ref}', 'delete', 'git/delete-ref', 'Delete a reference', 'rest/reference/git#delete-a-reference', [], [], ''), ('/repos/{owner}/{repo}/git/tags', 'post', 'git/create-tag', 'Create a tag object', 'rest/reference/git#create-a-tag-object', [], [['tag', str], ['message', str], ['object', str], ['type', str], ['tagger', dict]], ''), ('/repos/{owner}/{repo}/git/tags/{tag_sha}', 'get', 'git/get-tag', 'Get a tag', 'rest/reference/git#get-a-tag', [], [], ''), ('/repos/{owner}/{repo}/git/trees', 'post', 'git/create-tree', 'Create a tree', 'rest/reference/git#create-a-tree', [], [['tree', list], ['base_tree', str]], ''), ('/repos/{owner}/{repo}/git/trees/{tree_sha}', 'get', 'git/get-tree', 'Get a tree', 'rest/reference/git#get-a-tree', ['recursive'], [], ''), ('/repos/{owner}/{repo}/hooks', 'get', 'repos/list-webhooks', 'List repository webhooks', 'rest/reference/repos#list-repository-webhooks', ['per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/hooks', 'post', 'repos/create-webhook', 'Create a repository webhook', 'rest/reference/repos#create-a-repository-webhook', [], [['name', str], ['config', dict], ['events', list, ['push']], ['active', bool, True]], ''), ('/repos/{owner}/{repo}/hooks/{hook_id}', 'get', 'repos/get-webhook', 'Get a repository webhook', 'rest/reference/repos#get-a-repository-webhook', [], [], ''), ('/repos/{owner}/{repo}/hooks/{hook_id}', 'patch', 'repos/update-webhook', 'Update a repository webhook', 'rest/reference/repos#update-a-repository-webhook', [], [['config', dict], ['events', list, ['push']], ['add_events', list], ['remove_events', list], ['active', bool, True]], ''), ('/repos/{owner}/{repo}/hooks/{hook_id}', 'delete', 'repos/delete-webhook', 'Delete a repository webhook', 'rest/reference/repos#delete-a-repository-webhook', [], [], ''), ('/repos/{owner}/{repo}/hooks/{hook_id}/config', 'get', 'repos/get-webhook-config-for-repo', 'Get a webhook configuration for a repository', 'v3/repos#get-a-webhook-configuration-for-a-repository', [], [], ''), ('/repos/{owner}/{repo}/hooks/{hook_id}/config', 'patch', 'repos/update-webhook-config-for-repo', 'Update a webhook configuration for a repository', 'v3/repos#update-a-webhook-configuration-for-a-repository', [], [['url', str], ['content_type', str], ['secret', str], ['insecure_ssl', str]], ''), ('/repos/{owner}/{repo}/hooks/{hook_id}/pings', 'post', 'repos/ping-webhook', 'Ping a repository webhook', 'rest/reference/repos#ping-a-repository-webhook', [], [], ''), ('/repos/{owner}/{repo}/hooks/{hook_id}/tests', 'post', 'repos/test-push-webhook', 'Test the push repository webhook', 'rest/reference/repos#test-the-push-repository-webhook', [], [], ''), ('/repos/{owner}/{repo}/import', 'get', 'migrations/get-import-status', 'Get an import status', 'rest/reference/migrations#get-an-import-status', [], [], ''), ('/repos/{owner}/{repo}/import', 'put', 'migrations/start-import', 'Start an import', 'rest/reference/migrations#start-an-import', [], [['vcs_url', str], ['vcs', str], ['vcs_username', str], ['vcs_password', str], ['tfvc_project', str]], ''), ('/repos/{owner}/{repo}/import', 'patch', 'migrations/update-import', 'Update an import', 'rest/reference/migrations#update-an-import', [], [['vcs_username', str], ['vcs_password', str], ['vcs', str], ['tfvc_project', str]], ''), ('/repos/{owner}/{repo}/import', 'delete', 'migrations/cancel-import', 'Cancel an import', 'rest/reference/migrations#cancel-an-import', [], [], ''), ('/repos/{owner}/{repo}/import/authors', 'get', 'migrations/get-commit-authors', 'Get commit authors', 'rest/reference/migrations#get-commit-authors', ['since'], [], ''), ('/repos/{owner}/{repo}/import/authors/{author_id}', 'patch', 'migrations/map-commit-author', 'Map a commit author', 'rest/reference/migrations#map-a-commit-author', [], [['email', str], ['name', str], ['remote_id', str]], ''), ('/repos/{owner}/{repo}/import/large_files', 'get', 'migrations/get-large-files', 'Get large files', 'rest/reference/migrations#get-large-files', [], [], ''), ('/repos/{owner}/{repo}/import/lfs', 'patch', 'migrations/set-lfs-preference', 'Update Git LFS preference', 'rest/reference/migrations#update-git-lfs-preference', [], [['use_lfs', str]], ''), ('/repos/{owner}/{repo}/installation', 'get', 'apps/get-repo-installation', 'Get a repository installation for the authenticated app', 'v3/apps/#get-a-repository-installation-for-the-authenticated-app', [], [], ''), ('/repos/{owner}/{repo}/interaction-limits', 'get', 'interactions/get-restrictions-for-repo', 'Get interaction restrictions for a repository', 'rest/reference/interactions#get-interaction-restrictions-for-a-repository', [], [], ''), ('/repos/{owner}/{repo}/interaction-limits', 'put', 'interactions/set-restrictions-for-repo', 'Set interaction restrictions for a repository', 'rest/reference/interactions#set-interaction-restrictions-for-a-repository', [], [['limit', str], ['expiry', str]], ''), ('/repos/{owner}/{repo}/interaction-limits', 'delete', 'interactions/remove-restrictions-for-repo', 'Remove interaction restrictions for a repository', 'rest/reference/interactions#remove-interaction-restrictions-for-a-repository', [], [], ''), ('/repos/{owner}/{repo}/invitations', 'get', 'repos/list-invitations', 'List repository invitations', 'rest/reference/repos#list-repository-invitations', ['per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/invitations/{invitation_id}', 'patch', 'repos/update-invitation', 'Update a repository invitation', 'rest/reference/repos#update-a-repository-invitation', [], [['permissions', str]], ''), ('/repos/{owner}/{repo}/invitations/{invitation_id}', 'delete', 'repos/delete-invitation', 'Delete a repository invitation', 'rest/reference/repos#delete-a-repository-invitation', [], [], ''), ('/repos/{owner}/{repo}/issues', 'get', 'issues/list-for-repo', 'List repository issues', 'v3/issues/#list-repository-issues', ['milestone', 'state', 'assignee', 'creator', 'mentioned', 'labels', 'sort', 'direction', 'since', 'per_page', 'page'], [], 'squirrel-girl'), ('/repos/{owner}/{repo}/issues', 'post', 'issues/create', 'Create an issue', 'v3/issues/#create-an-issue', [], [['title', str], ['body', str], ['assignee', str], ['milestone', int], ['labels', list], ['assignees', list]], ''), ('/repos/{owner}/{repo}/issues/comments', 'get', 'issues/list-comments-for-repo', 'List issue comments for a repository', 'rest/reference/issues#list-issue-comments-for-a-repository', ['sort', 'direction', 'since', 'per_page', 'page'], [], 'squirrel-girl'), ('/repos/{owner}/{repo}/issues/comments/{comment_id}', 'get', 'issues/get-comment', 'Get an issue comment', 'rest/reference/issues#get-an-issue-comment', [], [], 'squirrel-girl'), ('/repos/{owner}/{repo}/issues/comments/{comment_id}', 'patch', 'issues/update-comment', 'Update an issue comment', 'rest/reference/issues#update-an-issue-comment', [], [['body', str]], ''), ('/repos/{owner}/{repo}/issues/comments/{comment_id}', 'delete', 'issues/delete-comment', 'Delete an issue comment', 'rest/reference/issues#delete-an-issue-comment', [], [], ''), ('/repos/{owner}/{repo}/issues/comments/{comment_id}/reactions', 'get', 'reactions/list-for-issue-comment', 'List reactions for an issue comment', 'v3/reactions/#list-reactions-for-an-issue-comment', ['content', 'per_page', 'page'], [], 'squirrel-girl'), ('/repos/{owner}/{repo}/issues/comments/{comment_id}/reactions', 'post', 'reactions/create-for-issue-comment', 'Create reaction for an issue comment', 'v3/reactions/#create-reaction-for-an-issue-comment', [], [['content', str]], 'squirrel-girl'), ('/repos/{owner}/{repo}/issues/comments/{comment_id}/reactions/{reaction_id}', 'delete', 'reactions/delete-for-issue-comment', 'Delete an issue comment reaction', 'v3/reactions/#delete-an-issue-comment-reaction', [], [], 'squirrel-girl'), ('/repos/{owner}/{repo}/issues/events', 'get', 'issues/list-events-for-repo', 'List issue events for a repository', 'rest/reference/issues#list-issue-events-for-a-repository', ['per_page', 'page'], [], 'starfox'), ('/repos/{owner}/{repo}/issues/events/{event_id}', 'get', 'issues/get-event', 'Get an issue event', 'rest/reference/issues#get-an-issue-event', [], [], 'starfox'), ('/repos/{owner}/{repo}/issues/{issue_number}', 'get', 'issues/get', 'Get an issue', 'v3/issues/#get-an-issue', [], [], 'squirrel-girl'), ('/repos/{owner}/{repo}/issues/{issue_number}', 'patch', 'issues/update', 'Update an issue', 'v3/issues/#update-an-issue', [], [['title', str], ['body', str], ['assignee', str], ['state', str], ['milestone', int], ['labels', list], ['assignees', list]], ''), ('/repos/{owner}/{repo}/issues/{issue_number}/assignees', 'post', 'issues/add-assignees', 'Add assignees to an issue', 'rest/reference/issues#add-assignees-to-an-issue', [], [['assignees', list]], ''), ('/repos/{owner}/{repo}/issues/{issue_number}/assignees', 'delete', 'issues/remove-assignees', 'Remove assignees from an issue', 'rest/reference/issues#remove-assignees-from-an-issue', [], [['assignees', list]], ''), ('/repos/{owner}/{repo}/issues/{issue_number}/comments', 'get', 'issues/list-comments', 'List issue comments', 'rest/reference/issues#list-issue-comments', ['since', 'per_page', 'page'], [], 'squirrel-girl'), ('/repos/{owner}/{repo}/issues/{issue_number}/comments', 'post', 'issues/create-comment', 'Create an issue comment', 'rest/reference/issues#create-an-issue-comment', [], [['body', str]], ''), ('/repos/{owner}/{repo}/issues/{issue_number}/events', 'get', 'issues/list-events', 'List issue events', 'rest/reference/issues#list-issue-events', ['per_page', 'page'], [], 'starfox'), ('/repos/{owner}/{repo}/issues/{issue_number}/labels', 'get', 'issues/list-labels-on-issue', 'List labels for an issue', 'rest/reference/issues#list-labels-for-an-issue', ['per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/issues/{issue_number}/labels', 'post', 'issues/add-labels', 'Add labels to an issue', 'rest/reference/issues#add-labels-to-an-issue', [], [['labels', list]], ''), ('/repos/{owner}/{repo}/issues/{issue_number}/labels', 'put', 'issues/set-labels', 'Set labels for an issue', 'rest/reference/issues#set-labels-for-an-issue', [], [['labels', list]], ''), ('/repos/{owner}/{repo}/issues/{issue_number}/labels', 'delete', 'issues/remove-all-labels', 'Remove all labels from an issue', 'rest/reference/issues#remove-all-labels-from-an-issue', [], [], ''), ('/repos/{owner}/{repo}/issues/{issue_number}/labels/{name}', 'delete', 'issues/remove-label', 'Remove a label from an issue', 'rest/reference/issues#remove-a-label-from-an-issue', [], [], ''), ('/repos/{owner}/{repo}/issues/{issue_number}/lock', 'put', 'issues/lock', 'Lock an issue', 'v3/issues/#lock-an-issue', [], [['lock_reason', str]], ''), ('/repos/{owner}/{repo}/issues/{issue_number}/lock', 'delete', 'issues/unlock', 'Unlock an issue', 'v3/issues/#unlock-an-issue', [], [], ''), ('/repos/{owner}/{repo}/issues/{issue_number}/reactions', 'get', 'reactions/list-for-issue', 'List reactions for an issue', 'v3/reactions/#list-reactions-for-an-issue', ['content', 'per_page', 'page'], [], 'squirrel-girl'), ('/repos/{owner}/{repo}/issues/{issue_number}/reactions', 'post', 'reactions/create-for-issue', 'Create reaction for an issue', 'v3/reactions/#create-reaction-for-an-issue', [], [['content', str]], 'squirrel-girl'), ('/repos/{owner}/{repo}/issues/{issue_number}/reactions/{reaction_id}', 'delete', 'reactions/delete-for-issue', 'Delete an issue reaction', 'v3/reactions/#delete-an-issue-reaction', [], [], 'squirrel-girl'), ('/repos/{owner}/{repo}/issues/{issue_number}/timeline', 'get', 'issues/list-events-for-timeline', 'List timeline events for an issue', 'rest/reference/issues#list-timeline-events-for-an-issue', ['per_page', 'page'], [], 'mockingbird'), ('/repos/{owner}/{repo}/keys', 'get', 'repos/list-deploy-keys', 'List deploy keys', 'rest/reference/repos#list-deploy-keys', ['per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/keys', 'post', 'repos/create-deploy-key', 'Create a deploy key', 'rest/reference/repos#create-a-deploy-key', [], [['title', str], ['key', str], ['read_only', bool]], ''), ('/repos/{owner}/{repo}/keys/{key_id}', 'get', 'repos/get-deploy-key', 'Get a deploy key', 'rest/reference/repos#get-a-deploy-key', [], [], ''), ('/repos/{owner}/{repo}/keys/{key_id}', 'delete', 'repos/delete-deploy-key', 'Delete a deploy key', 'rest/reference/repos#delete-a-deploy-key', [], [], ''), ('/repos/{owner}/{repo}/labels', 'get', 'issues/list-labels-for-repo', 'List labels for a repository', 'rest/reference/issues#list-labels-for-a-repository', ['per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/labels', 'post', 'issues/create-label', 'Create a label', 'rest/reference/issues#create-a-label', [], [['name', str], ['color', str], ['description', str]], ''), ('/repos/{owner}/{repo}/labels/{name}', 'get', 'issues/get-label', 'Get a label', 'rest/reference/issues#get-a-label', [], [], ''), ('/repos/{owner}/{repo}/labels/{name}', 'patch', 'issues/update-label', 'Update a label', 'rest/reference/issues#update-a-label', [], [['new_name', str], ['color', str], ['description', str]], ''), ('/repos/{owner}/{repo}/labels/{name}', 'delete', 'issues/delete-label', 'Delete a label', 'rest/reference/issues#delete-a-label', [], [], ''), ('/repos/{owner}/{repo}/languages', 'get', 'repos/list-languages', 'List repository languages', 'v3/repos/#list-repository-languages', [], [], ''), ('/repos/{owner}/{repo}/license', 'get', 'licenses/get-for-repo', 'Get the license for a repository', 'v3/licenses/#get-the-license-for-a-repository', [], [], ''), ('/repos/{owner}/{repo}/merges', 'post', 'repos/merge', 'Merge a branch', 'rest/reference/repos#merge-a-branch', [], [['base', str], ['head', str], ['commit_message', str]], ''), ('/repos/{owner}/{repo}/milestones', 'get', 'issues/list-milestones', 'List milestones', 'rest/reference/issues#list-milestones', ['state', 'sort', 'direction', 'per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/milestones', 'post', 'issues/create-milestone', 'Create a milestone', 'rest/reference/issues#create-a-milestone', [], [['title', str], ['state', str, 'open'], ['description', str], ['due_on', str]], ''), ('/repos/{owner}/{repo}/milestones/{milestone_number}', 'get', 'issues/get-milestone', 'Get a milestone', 'rest/reference/issues#get-a-milestone', [], [], ''), ('/repos/{owner}/{repo}/milestones/{milestone_number}', 'patch', 'issues/update-milestone', 'Update a milestone', 'rest/reference/issues#update-a-milestone', [], [['title', str], ['state', str, 'open'], ['description', str], ['due_on', str]], ''), ('/repos/{owner}/{repo}/milestones/{milestone_number}', 'delete', 'issues/delete-milestone', 'Delete a milestone', 'rest/reference/issues#delete-a-milestone', [], [], ''), ('/repos/{owner}/{repo}/milestones/{milestone_number}/labels', 'get', 'issues/list-labels-for-milestone', 'List labels for issues in a milestone', 'rest/reference/issues#list-labels-for-issues-in-a-milestone', ['per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/notifications', 'get', 'activity/list-repo-notifications-for-authenticated-user', 'List repository notifications for the authenticated user', 'rest/reference/activity#list-repository-notifications-for-the-authenticated-user', ['all', 'participating', 'since', 'before', 'per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/notifications', 'put', 'activity/mark-repo-notifications-as-read', 'Mark repository notifications as read', 'rest/reference/activity#mark-repository-notifications-as-read', [], [['last_read_at', str]], ''), ('/repos/{owner}/{repo}/pages', 'get', 'repos/get-pages', 'Get a GitHub Pages site', 'rest/reference/repos#get-a-github-pages-site', [], [], ''), ('/repos/{owner}/{repo}/pages', 'post', 'repos/create-pages-site', 'Create a GitHub Pages site', 'rest/reference/repos#create-a-github-pages-site', [], [['source', dict]], 'switcheroo'), ('/repos/{owner}/{repo}/pages', 'put', 'repos/update-information-about-pages-site', 'Update information about a GitHub Pages site', 'rest/reference/repos#update-information-about-a-github-pages-site', [], [['cname', str], ['source', object]], ''), ('/repos/{owner}/{repo}/pages', 'delete', 'repos/delete-pages-site', 'Delete a GitHub Pages site', 'rest/reference/repos#delete-a-github-pages-site', [], [], 'switcheroo'), ('/repos/{owner}/{repo}/pages/builds', 'get', 'repos/list-pages-builds', 'List GitHub Pages builds', 'rest/reference/repos#list-github-pages-builds', ['per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/pages/builds', 'post', 'repos/request-pages-build', 'Request a GitHub Pages build', 'rest/reference/repos#request-a-github-pages-build', [], [], ''), ('/repos/{owner}/{repo}/pages/builds/latest', 'get', 'repos/get-latest-pages-build', 'Get latest Pages build', 'rest/reference/repos#get-latest-pages-build', [], [], ''), ('/repos/{owner}/{repo}/pages/builds/{build_id}', 'get', 'repos/get-pages-build', 'Get GitHub Pages build', 'rest/reference/repos#get-github-pages-build', [], [], ''), ('/repos/{owner}/{repo}/projects', 'get', 'projects/list-for-repo', 'List repository projects', 'v3/projects/#list-repository-projects', ['state', 'per_page', 'page'], [], 'inertia'), ('/repos/{owner}/{repo}/projects', 'post', 'projects/create-for-repo', 'Create a repository project', 'v3/projects/#create-a-repository-project', [], [['name', str], ['body', str]], 'inertia'), ('/repos/{owner}/{repo}/pulls', 'get', 'pulls/list', 'List pull requests', 'v3/pulls/#list-pull-requests', ['state', 'head', 'base', 'sort', 'direction', 'per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/pulls', 'post', 'pulls/create', 'Create a pull request', 'v3/pulls/#create-a-pull-request', [], [['title', str], ['head', str], ['base', str], ['body', str], ['maintainer_can_modify', bool], ['draft', bool], ['issue', int]], ''), ('/repos/{owner}/{repo}/pulls/comments', 'get', 'pulls/list-review-comments-for-repo', 'List review comments in a repository', 'rest/reference/pulls#list-review-comments-in-a-repository', ['sort', 'direction', 'since', 'per_page', 'page'], [], 'comfort-fade'), ('/repos/{owner}/{repo}/pulls/comments/{comment_id}', 'get', 'pulls/get-review-comment', 'Get a review comment for a pull request', 'rest/reference/pulls#get-a-review-comment-for-a-pull-request', [], [], 'comfort-fade'), ('/repos/{owner}/{repo}/pulls/comments/{comment_id}', 'patch', 'pulls/update-review-comment', 'Update a review comment for a pull request', 'rest/reference/pulls#update-a-review-comment-for-a-pull-request', [], [['body', str]], 'comfort-fade'), ('/repos/{owner}/{repo}/pulls/comments/{comment_id}', 'delete', 'pulls/delete-review-comment', 'Delete a review comment for a pull request', 'rest/reference/pulls#delete-a-review-comment-for-a-pull-request', [], [], ''), ('/repos/{owner}/{repo}/pulls/comments/{comment_id}/reactions', 'get', 'reactions/list-for-pull-request-review-comment', 'List reactions for a pull request review comment', 'v3/reactions/#list-reactions-for-a-pull-request-review-comment', ['content', 'per_page', 'page'], [], 'squirrel-girl'), ('/repos/{owner}/{repo}/pulls/comments/{comment_id}/reactions', 'post', 'reactions/create-for-pull-request-review-comment', 'Create reaction for a pull request review comment', 'v3/reactions/#create-reaction-for-a-pull-request-review-comment', [], [['content', str]], 'squirrel-girl'), ('/repos/{owner}/{repo}/pulls/comments/{comment_id}/reactions/{reaction_id}', 'delete', 'reactions/delete-for-pull-request-comment', 'Delete a pull request comment reaction', 'v3/reactions/#delete-a-pull-request-comment-reaction', [], [], 'squirrel-girl'), ('/repos/{owner}/{repo}/pulls/{pull_number}', 'get', 'pulls/get', 'Get a pull request', 'v3/pulls/#get-a-pull-request', [], [], ''), ('/repos/{owner}/{repo}/pulls/{pull_number}', 'patch', 'pulls/update', 'Update a pull request', 'v3/pulls/#update-a-pull-request', [], [['title', str], ['body', str], ['state', str], ['base', str], ['maintainer_can_modify', bool]], ''), ('/repos/{owner}/{repo}/pulls/{pull_number}/comments', 'get', 'pulls/list-review-comments', 'List review comments on a pull request', 'rest/reference/pulls#list-review-comments-on-a-pull-request', ['sort', 'direction', 'since', 'per_page', 'page'], [], 'comfort-fade'), ('/repos/{owner}/{repo}/pulls/{pull_number}/comments', 'post', 'pulls/create-review-comment', 'Create a review comment for a pull request', 'rest/reference/pulls#create-a-review-comment-for-a-pull-request', [], [['body', str], ['commit_id', str], ['path', str], ['position', int], ['side', str], ['line', int], ['start_line', int], ['start_side', str], ['in_reply_to', int]], 'comfort-fade'), ('/repos/{owner}/{repo}/pulls/{pull_number}/comments/{comment_id}/replies', 'post', 'pulls/create-reply-for-review-comment', 'Create a reply for a review comment', 'rest/reference/pulls#create-a-reply-for-a-review-comment', [], [['body', str]], ''), ('/repos/{owner}/{repo}/pulls/{pull_number}/commits', 'get', 'pulls/list-commits', 'List commits on a pull request', 'v3/pulls/#list-commits-on-a-pull-request', ['per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/pulls/{pull_number}/files', 'get', 'pulls/list-files', 'List pull requests files', 'v3/pulls/#list-pull-requests-files', ['per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/pulls/{pull_number}/merge', 'get', 'pulls/check-if-merged', 'Check if a pull request has been merged', 'v3/pulls/#check-if-a-pull-request-has-been-merged', [], [], ''), ('/repos/{owner}/{repo}/pulls/{pull_number}/merge', 'put', 'pulls/merge', 'Merge a pull request', 'v3/pulls/#merge-a-pull-request', [], [['commit_title', str], ['commit_message', str], ['sha', str], ['merge_method', str]], ''), ('/repos/{owner}/{repo}/pulls/{pull_number}/requested_reviewers', 'get', 'pulls/list-requested-reviewers', 'List requested reviewers for a pull request', 'rest/reference/pulls#list-requested-reviewers-for-a-pull-request', ['per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/pulls/{pull_number}/requested_reviewers', 'post', 'pulls/request-reviewers', 'Request reviewers for a pull request', 'rest/reference/pulls#request-reviewers-for-a-pull-request', [], [['reviewers', list], ['team_reviewers', list]], ''), ('/repos/{owner}/{repo}/pulls/{pull_number}/requested_reviewers', 'delete', 'pulls/remove-requested-reviewers', 'Remove requested reviewers from a pull request', 'rest/reference/pulls#remove-requested-reviewers-from-a-pull-request', [], [['reviewers', list], ['team_reviewers', list]], ''), ('/repos/{owner}/{repo}/pulls/{pull_number}/reviews', 'get', 'pulls/list-reviews', 'List reviews for a pull request', 'rest/reference/pulls#list-reviews-for-a-pull-request', ['per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/pulls/{pull_number}/reviews', 'post', 'pulls/create-review', 'Create a review for a pull request', 'rest/reference/pulls#create-a-review-for-a-pull-request', [], [['commit_id', str], ['body', str], ['event', str], ['comments', list]], ''), ('/repos/{owner}/{repo}/pulls/{pull_number}/reviews/{review_id}', 'get', 'pulls/get-review', 'Get a review for a pull request', 'rest/reference/pulls#get-a-review-for-a-pull-request', [], [], ''), ('/repos/{owner}/{repo}/pulls/{pull_number}/reviews/{review_id}', 'put', 'pulls/update-review', 'Update a review for a pull request', 'rest/reference/pulls#update-a-review-for-a-pull-request', [], [['body', str]], ''), ('/repos/{owner}/{repo}/pulls/{pull_number}/reviews/{review_id}', 'delete', 'pulls/delete-pending-review', 'Delete a pending review for a pull request', 'rest/reference/pulls#delete-a-pending-review-for-a-pull-request', [], [], ''), ('/repos/{owner}/{repo}/pulls/{pull_number}/reviews/{review_id}/comments', 'get', 'pulls/list-comments-for-review', 'List comments for a pull request review', 'rest/reference/pulls#list-comments-for-a-pull-request-review', ['per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/pulls/{pull_number}/reviews/{review_id}/dismissals', 'put', 'pulls/dismiss-review', 'Dismiss a review for a pull request', 'rest/reference/pulls#dismiss-a-review-for-a-pull-request', [], [['message', str], ['event', str]], ''), ('/repos/{owner}/{repo}/pulls/{pull_number}/reviews/{review_id}/events', 'post', 'pulls/submit-review', 'Submit a review for a pull request', 'rest/reference/pulls#submit-a-review-for-a-pull-request', [], [['body', str], ['event', str]], ''), ('/repos/{owner}/{repo}/pulls/{pull_number}/update-branch', 'put', 'pulls/update-branch', 'Update a pull request branch', 'v3/pulls/#update-a-pull-request-branch', [], [['expected_head_sha', str]], 'lydian'), ('/repos/{owner}/{repo}/readme', 'get', 'repos/get-readme', 'Get a repository README', 'rest/reference/repos#get-a-repository-readme', ['ref'], [], ''), ('/repos/{owner}/{repo}/releases', 'get', 'repos/list-releases', 'List releases', 'rest/reference/repos#list-releases', ['per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/releases', 'post', 'repos/create-release', 'Create a release', 'rest/reference/repos#create-a-release', [], [['tag_name', str], ['target_commitish', str], ['name', str], ['body', str], ['draft', bool, False], ['prerelease', bool, False]], ''), ('/repos/{owner}/{repo}/releases/assets/{asset_id}', 'get', 'repos/get-release-asset', 'Get a release asset', 'rest/reference/repos#get-a-release-asset', [], [], ''), ('/repos/{owner}/{repo}/releases/assets/{asset_id}', 'patch', 'repos/update-release-asset', 'Update a release asset', 'rest/reference/repos#update-a-release-asset', [], [['name', str], ['label', str], ['state', str]], ''), ('/repos/{owner}/{repo}/releases/assets/{asset_id}', 'delete', 'repos/delete-release-asset', 'Delete a release asset', 'rest/reference/repos#delete-a-release-asset', [], [], ''), ('/repos/{owner}/{repo}/releases/latest', 'get', 'repos/get-latest-release', 'Get the latest release', 'rest/reference/repos#get-the-latest-release', [], [], ''), ('/repos/{owner}/{repo}/releases/tags/{tag}', 'get', 'repos/get-release-by-tag', 'Get a release by tag name', 'rest/reference/repos#get-a-release-by-tag-name', [], [], ''), ('/repos/{owner}/{repo}/releases/{release_id}', 'get', 'repos/get-release', 'Get a release', 'rest/reference/repos#get-a-release', [], [], ''), ('/repos/{owner}/{repo}/releases/{release_id}', 'patch', 'repos/update-release', 'Update a release', 'rest/reference/repos#update-a-release', [], [['tag_name', str], ['target_commitish', str], ['name', str], ['body', str], ['draft', bool], ['prerelease', bool]], ''), ('/repos/{owner}/{repo}/releases/{release_id}', 'delete', 'repos/delete-release', 'Delete a release', 'rest/reference/repos#delete-a-release', [], [], ''), ('/repos/{owner}/{repo}/releases/{release_id}/assets', 'get', 'repos/list-release-assets', 'List release assets', 'rest/reference/repos#list-release-assets', ['per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/releases/{release_id}/assets', 'post', 'repos/upload-release-asset', 'Upload a release asset', 'rest/reference/repos#upload-a-release-asset', ['name', 'label'], [], ''), ('/repos/{owner}/{repo}/secret-scanning/alerts', 'get', 'secret-scanning/list-alerts-for-repo', 'List secret scanning alerts for a repository', 'rest/reference/secret-scanning#list-secret-scanning-alerts-for-a-repository', ['state', 'page', 'per_page'], [], ''), ('/repos/{owner}/{repo}/secret-scanning/alerts/{alert_number}', 'get', 'secret-scanning/get-alert', 'Get a secret scanning alert', 'rest/reference/secret-scanning#get-a-secret-scanning-alert', [], [], ''), ('/repos/{owner}/{repo}/secret-scanning/alerts/{alert_number}', 'patch', 'secret-scanning/update-alert', 'Update a secret scanning alert', 'rest/reference/secret-scanning#update-a-secret-scanning-alert', [], [['state', str], ['resolution', str]], ''), ('/repos/{owner}/{repo}/stargazers', 'get', 'activity/list-stargazers-for-repo', 'List stargazers', 'rest/reference/activity#list-stargazers', ['per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/stats/code_frequency', 'get', 'repos/get-code-frequency-stats', 'Get the weekly commit activity', 'rest/reference/repos#get-the-weekly-commit-activity', [], [], ''), ('/repos/{owner}/{repo}/stats/commit_activity', 'get', 'repos/get-commit-activity-stats', 'Get the last year of commit activity', 'rest/reference/repos#get-the-last-year-of-commit-activity', [], [], ''), ('/repos/{owner}/{repo}/stats/contributors', 'get', 'repos/get-contributors-stats', 'Get all contributor commit activity', 'rest/reference/repos#get-all-contributor-commit-activity', [], [], ''), ('/repos/{owner}/{repo}/stats/participation', 'get', 'repos/get-participation-stats', 'Get the weekly commit count', 'rest/reference/repos#get-the-weekly-commit-count', [], [], ''), ('/repos/{owner}/{repo}/stats/punch_card', 'get', 'repos/get-punch-card-stats', 'Get the hourly commit count for each day', 'rest/reference/repos#get-the-hourly-commit-count-for-each-day', [], [], ''), ('/repos/{owner}/{repo}/statuses/{sha}', 'post', 'repos/create-commit-status', 'Create a commit status', 'rest/reference/repos#create-a-commit-status', [], [['state', str], ['target_url', str], ['description', str], ['context', str, 'default']], ''), ('/repos/{owner}/{repo}/subscribers', 'get', 'activity/list-watchers-for-repo', 'List watchers', 'rest/reference/activity#list-watchers', ['per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/subscription', 'get', 'activity/get-repo-subscription', 'Get a repository subscription', 'rest/reference/activity#get-a-repository-subscription', [], [], ''), ('/repos/{owner}/{repo}/subscription', 'put', 'activity/set-repo-subscription', 'Set a repository subscription', 'rest/reference/activity#set-a-repository-subscription', [], [['subscribed', bool], ['ignored', bool]], ''), ('/repos/{owner}/{repo}/subscription', 'delete', 'activity/delete-repo-subscription', 'Delete a repository subscription', 'rest/reference/activity#delete-a-repository-subscription', [], [], ''), ('/repos/{owner}/{repo}/tags', 'get', 'repos/list-tags', 'List repository tags', 'v3/repos/#list-repository-tags', ['per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/tarball/{ref}', 'get', 'repos/download-tarball-archive', 'Download a repository archive (tar)', 'rest/reference/repos#download-a-repository-archive', [], [], ''), ('/repos/{owner}/{repo}/teams', 'get', 'repos/list-teams', 'List repository teams', 'v3/repos/#list-repository-teams', ['per_page', 'page'], [], ''), ('/repos/{owner}/{repo}/topics', 'get', 'repos/get-all-topics', 'Get all repository topics', 'v3/repos/#get-all-repository-topics', [], [], 'mercy'), ('/repos/{owner}/{repo}/topics', 'put', 'repos/replace-all-topics', 'Replace all repository topics', 'v3/repos/#replace-all-repository-topics', [], [['names', list]], 'mercy'), ('/repos/{owner}/{repo}/traffic/clones', 'get', 'repos/get-clones', 'Get repository clones', 'rest/reference/repos#get-repository-clones', ['per'], [], ''), ('/repos/{owner}/{repo}/traffic/popular/paths', 'get', 'repos/get-top-paths', 'Get top referral paths', 'rest/reference/repos#get-top-referral-paths', [], [], ''), ('/repos/{owner}/{repo}/traffic/popular/referrers', 'get', 'repos/get-top-referrers', 'Get top referral sources', 'rest/reference/repos#get-top-referral-sources', [], [], ''), ('/repos/{owner}/{repo}/traffic/views', 'get', 'repos/get-views', 'Get page views', 'rest/reference/repos#get-page-views', ['per'], [], ''), ('/repos/{owner}/{repo}/transfer', 'post', 'repos/transfer', 'Transfer a repository', 'v3/repos/#transfer-a-repository', [], [['new_owner', str], ['team_ids', list]], ''), ('/repos/{owner}/{repo}/vulnerability-alerts', 'get', 'repos/check-vulnerability-alerts', 'Check if vulnerability alerts are enabled for a repository', 'v3/repos/#check-if-vulnerability-alerts-are-enabled-for-a-repository', [], [], 'dorian'), ('/repos/{owner}/{repo}/vulnerability-alerts', 'put', 'repos/enable-vulnerability-alerts', 'Enable vulnerability alerts', 'v3/repos/#enable-vulnerability-alerts', [], [], 'dorian'), ('/repos/{owner}/{repo}/vulnerability-alerts', 'delete', 'repos/disable-vulnerability-alerts', 'Disable vulnerability alerts', 'v3/repos/#disable-vulnerability-alerts', [], [], 'dorian'), ('/repos/{owner}/{repo}/zipball/{ref}', 'get', 'repos/download-zipball-archive', 'Download a repository archive (zip)', 'rest/reference/repos#download-a-repository-archive', [], [], ''), ('/repos/{template_owner}/{template_repo}/generate', 'post', 'repos/create-using-template', 'Create a repository using a template', 'v3/repos/#create-a-repository-using-a-template', [], [['owner', str], ['name', str], ['description', str], ['include_all_branches', bool, False], ['private', bool, False]], 'baptiste'), ('/repositories', 'get', 'repos/list-public', 'List public repositories', 'v3/repos/#list-public-repositories', ['since'], [], ''), ('/scim/v2/enterprises/{enterprise}/Groups', 'get', 'enterprise-admin/list-provisioned-groups-enterprise', 'List provisioned SCIM groups for an enterprise', 'rest/reference/enterprise-admin#list-provisioned-scim groups-for-an-enterprise', ['startIndex', 'count'], [], ''), ('/scim/v2/enterprises/{enterprise}/Groups', 'post', 'enterprise-admin/provision-and-invite-enterprise-group', 'Provision a SCIM enterprise group and invite users', 'rest/reference/enterprise-admin#provision-a-scim-enterprise-group-and-invite-users', [], [['schemas', list], ['displayName', str], ['members', list]], ''), ('/scim/v2/enterprises/{enterprise}/Groups/{scim_group_id}', 'get', 'enterprise-admin/get-provisioning-information-for-enterprise-group', 'Get SCIM provisioning information for an enterprise group', 'rest/reference/enterprise-admin#get-scim-provisioning-information-for-an-enterprise group', [], [], ''), ('/scim/v2/enterprises/{enterprise}/Groups/{scim_group_id}', 'put', 'enterprise-admin/set-information-for-provisioned-enterprise-group', 'Set SCIM information for a provisioned enterprise group', 'rest/reference/enterprise-admin#set-scim-information-for-a-provisioned-enterprise-group', [], [['schemas', list], ['displayName', str], ['members', list]], ''), ('/scim/v2/enterprises/{enterprise}/Groups/{scim_group_id}', 'patch', 'enterprise-admin/update-attribute-for-enterprise-group', 'Update an attribute for a SCIM enterprise group', 'rest/reference/enterprise-admin#update-an-attribute-for-a-scim-enterprise-group', [], [['schemas', list], ['Operations', list]], ''), ('/scim/v2/enterprises/{enterprise}/Groups/{scim_group_id}', 'delete', 'enterprise-admin/delete-scim-group-from-enterprise', 'Delete a SCIM group from an enterprise', 'rest/reference/enterprise-admin#delete-a-scim-group-from-an-enterprise', [], [], ''), ('/scim/v2/enterprises/{enterprise}/Users', 'get', 'enterprise-admin/list-provisioned-identities-enterprise', 'List SCIM provisioned identities for an enterprise', 'rest/reference/enterprise-admin#list-scim-provisioned-identities-for-an-enterprise', ['startIndex', 'count'], [], ''), ('/scim/v2/enterprises/{enterprise}/Users', 'post', 'enterprise-admin/provision-and-invite-enterprise-user', 'Provision and invite a SCIM enterprise user', 'rest/reference/enterprise-admin#provision-and-invite-a-scim-enterprise-user', [], [['schemas', list], ['userName', str], ['name', dict], ['emails', list], ['groups', list]], ''), ('/scim/v2/enterprises/{enterprise}/Users/{scim_user_id}', 'get', 'enterprise-admin/get-provisioning-information-for-enterprise-user', 'Get SCIM provisioning information for an enterprise user', 'rest/reference/enterprise-admin#get-scim-provisioning-information-for-an-enterprise-user', [], [], ''), ('/scim/v2/enterprises/{enterprise}/Users/{scim_user_id}', 'put', 'enterprise-admin/set-information-for-provisioned-enterprise-user', 'Set SCIM information for a provisioned enterprise user', 'rest/reference/enterprise-admin#set-scim-information-for-a-provisioned-enterprise-user', [], [['schemas', list], ['userName', str], ['name', dict], ['emails', list], ['groups', list]], ''), ('/scim/v2/enterprises/{enterprise}/Users/{scim_user_id}', 'patch', 'enterprise-admin/update-attribute-for-enterprise-user', 'Update an attribute for a SCIM enterprise user', 'rest/reference/enterprise-admin#update-an-attribute-for-a-scim-enterprise-user', [], [['schemas', list], ['Operations', list]], ''), ('/scim/v2/enterprises/{enterprise}/Users/{scim_user_id}', 'delete', 'enterprise-admin/delete-user-from-enterprise', 'Delete a SCIM user from an enterprise', 'rest/reference/enterprise-admin#delete-a-scim-user-from-an-enterprise', [], [], ''), ('/scim/v2/organizations/{org}/Users', 'get', 'scim/list-provisioned-identities', 'List SCIM provisioned identities', 'v3/scim/#list-scim-provisioned-identities', ['startIndex', 'count', 'filter'], [], ''), ('/scim/v2/organizations/{org}/Users', 'post', 'scim/provision-and-invite-user', 'Provision and invite a SCIM user', 'v3/scim/#provision-and-invite-a-scim-user', [], [['userName', str], ['displayName', str], ['name', dict], ['emails', list], ['schemas', list], ['externalId', str], ['groups', list], ['active', bool]], ''), ('/scim/v2/organizations/{org}/Users/{scim_user_id}', 'get', 'scim/get-provisioning-information-for-user', 'Get SCIM provisioning information for a user', 'v3/scim/#get-scim-provisioning-information-for-a-user', [], [], ''), ('/scim/v2/organizations/{org}/Users/{scim_user_id}', 'put', 'scim/set-information-for-provisioned-user', 'Update a provisioned organization membership', 'v3/scim/#set-scim-information-for-a-provisioned-user', [], [['schemas', list], ['displayName', str], ['externalId', str], ['groups', list], ['active', bool], ['userName', str], ['name', dict], ['emails', list]], ''), ('/scim/v2/organizations/{org}/Users/{scim_user_id}', 'patch', 'scim/update-attribute-for-user', 'Update an attribute for a SCIM user', 'v3/scim/#update-an-attribute-for-a-scim-user', [], [['schemas', list], ['Operations', list]], ''), ('/scim/v2/organizations/{org}/Users/{scim_user_id}', 'delete', 'scim/delete-user-from-org', 'Delete a SCIM user from an organization', 'v3/scim/#delete-a-scim-user-from-an-organization', [], [], ''), ('/search/code', 'get', 'search/code', 'Search code', 'v3/search/#search-code', ['q', 'sort', 'order', 'per_page', 'page'], [], ''), ('/search/commits', 'get', 'search/commits', 'Search commits', 'v3/search/#search-commits', ['q', 'sort', 'order', 'per_page', 'page'], [], 'cloak'), ('/search/issues', 'get', 'search/issues-and-pull-requests', 'Search issues and pull requests', 'v3/search/#search-issues-and-pull-requests', ['q', 'sort', 'order', 'per_page', 'page'], [], ''), ('/search/labels', 'get', 'search/labels', 'Search labels', 'v3/search/#search-labels', ['repository_id', 'q', 'sort', 'order'], [], ''), ('/search/repositories', 'get', 'search/repos', 'Search repositories', 'v3/search/#search-repositories', ['q', 'sort', 'order', 'per_page', 'page'], [], 'mercy'), ('/search/topics', 'get', 'search/topics', 'Search topics', 'v3/search/#search-topics', ['q'], [], 'mercy'), ('/search/users', 'get', 'search/users', 'Search users', 'v3/search/#search-users', ['q', 'sort', 'order', 'per_page', 'page'], [], ''), ('/teams/{team_id}', 'get', 'teams/get-legacy', 'Get a team (Legacy)', 'v3/teams/#get-a-team-legacy', [], [], ''), ('/teams/{team_id}', 'patch', 'teams/update-legacy', 'Update a team (Legacy)', 'v3/teams/#update-a-team-legacy', [], [['name', str], ['description', str], ['privacy', str], ['permission', str, 'pull'], ['parent_team_id', int]], ''), ('/teams/{team_id}', 'delete', 'teams/delete-legacy', 'Delete a team (Legacy)', 'v3/teams/#delete-a-team-legacy', [], [], ''), ('/teams/{team_id}/discussions', 'get', 'teams/list-discussions-legacy', 'List discussions (Legacy)', 'rest/reference/teams#list-discussions-legacy', ['direction', 'per_page', 'page'], [], 'squirrel-girl'), ('/teams/{team_id}/discussions', 'post', 'teams/create-discussion-legacy', 'Create a discussion (Legacy)', 'rest/reference/teams#create-a-discussion-legacy', [], [['title', str], ['body', str], ['private', bool, False]], 'squirrel-girl'), ('/teams/{team_id}/discussions/{discussion_number}', 'get', 'teams/get-discussion-legacy', 'Get a discussion (Legacy)', 'rest/reference/teams#get-a-discussion-legacy', [], [], 'squirrel-girl'), ('/teams/{team_id}/discussions/{discussion_number}', 'patch', 'teams/update-discussion-legacy', 'Update a discussion (Legacy)', 'rest/reference/teams#update-a-discussion-legacy', [], [['title', str], ['body', str]], 'squirrel-girl'), ('/teams/{team_id}/discussions/{discussion_number}', 'delete', 'teams/delete-discussion-legacy', 'Delete a discussion (Legacy)', 'rest/reference/teams#delete-a-discussion-legacy', [], [], ''), ('/teams/{team_id}/discussions/{discussion_number}/comments', 'get', 'teams/list-discussion-comments-legacy', 'List discussion comments (Legacy)', 'rest/reference/teams#list-discussion-comments-legacy', ['direction', 'per_page', 'page'], [], 'squirrel-girl'), ('/teams/{team_id}/discussions/{discussion_number}/comments', 'post', 'teams/create-discussion-comment-legacy', 'Create a discussion comment (Legacy)', 'rest/reference/teams#create-a-discussion-comment-legacy', [], [['body', str]], 'squirrel-girl'), ('/teams/{team_id}/discussions/{discussion_number}/comments/{comment_number}', 'get', 'teams/get-discussion-comment-legacy', 'Get a discussion comment (Legacy)', 'rest/reference/teams#get-a-discussion-comment-legacy', [], [], 'squirrel-girl'), ('/teams/{team_id}/discussions/{discussion_number}/comments/{comment_number}', 'patch', 'teams/update-discussion-comment-legacy', 'Update a discussion comment (Legacy)', 'rest/reference/teams#update-a-discussion-comment-legacy', [], [['body', str]], 'squirrel-girl'), ('/teams/{team_id}/discussions/{discussion_number}/comments/{comment_number}', 'delete', 'teams/delete-discussion-comment-legacy', 'Delete a discussion comment (Legacy)', 'rest/reference/teams#delete-a-discussion-comment-legacy', [], [], ''), ('/teams/{team_id}/discussions/{discussion_number}/comments/{comment_number}/reactions', 'get', 'reactions/list-for-team-discussion-comment-legacy', 'List reactions for a team discussion comment (Legacy)', 'v3/reactions/#list-reactions-for-a-team-discussion-comment-legacy', ['content', 'per_page', 'page'], [], 'squirrel-girl'), ('/teams/{team_id}/discussions/{discussion_number}/comments/{comment_number}/reactions', 'post', 'reactions/create-for-team-discussion-comment-legacy', 'Create reaction for a team discussion comment (Legacy)', 'v3/reactions/#create-reaction-for-a-team-discussion-comment-legacy', [], [['content', str]], 'squirrel-girl'), ('/teams/{team_id}/discussions/{discussion_number}/reactions', 'get', 'reactions/list-for-team-discussion-legacy', 'List reactions for a team discussion (Legacy)', 'v3/reactions/#list-reactions-for-a-team-discussion-legacy', ['content', 'per_page', 'page'], [], 'squirrel-girl'), ('/teams/{team_id}/discussions/{discussion_number}/reactions', 'post', 'reactions/create-for-team-discussion-legacy', 'Create reaction for a team discussion (Legacy)', 'v3/reactions/#create-reaction-for-a-team-discussion-legacy', [], [['content', str]], 'squirrel-girl'), ('/teams/{team_id}/invitations', 'get', 'teams/list-pending-invitations-legacy', 'List pending team invitations (Legacy)', 'rest/reference/teams#list-pending-team-invitations-legacy', ['per_page', 'page'], [], ''), ('/teams/{team_id}/members', 'get', 'teams/list-members-legacy', 'List team members (Legacy)', 'rest/reference/teams#list-team-members-legacy', ['role', 'per_page', 'page'], [], ''), ('/teams/{team_id}/members/{username}', 'get', 'teams/get-member-legacy', 'Get team member (Legacy)', 'rest/reference/teams#get-team-member-legacy', [], [], ''), ('/teams/{team_id}/members/{username}', 'put', 'teams/add-member-legacy', 'Add team member (Legacy)', 'rest/reference/teams#add-team-member-legacy', [], [], ''), ('/teams/{team_id}/members/{username}', 'delete', 'teams/remove-member-legacy', 'Remove team member (Legacy)', 'rest/reference/teams#remove-team-member-legacy', [], [], ''), ('/teams/{team_id}/memberships/{username}', 'get', 'teams/get-membership-for-user-legacy', 'Get team membership for a user (Legacy)', 'rest/reference/teams#get-team-membership-for-a-user-legacy', [], [], ''), ('/teams/{team_id}/memberships/{username}', 'put', 'teams/add-or-update-membership-for-user-legacy', 'Add or update team membership for a user (Legacy)', 'rest/reference/teams#add-or-update-team-membership-for-a-user-legacy', [], [['role', str, 'member']], ''), ('/teams/{team_id}/memberships/{username}', 'delete', 'teams/remove-membership-for-user-legacy', 'Remove team membership for a user (Legacy)', 'rest/reference/teams#remove-team-membership-for-a-user-legacy', [], [], ''), ('/teams/{team_id}/projects', 'get', 'teams/list-projects-legacy', 'List team projects (Legacy)', 'v3/teams/#list-team-projects-legacy', ['per_page', 'page'], [], 'inertia'), ('/teams/{team_id}/projects/{project_id}', 'get', 'teams/check-permissions-for-project-legacy', 'Check team permissions for a project (Legacy)', 'v3/teams/#check-team-permissions-for-a-project-legacy', [], [], 'inertia'), ('/teams/{team_id}/projects/{project_id}', 'put', 'teams/add-or-update-project-permissions-legacy', 'Add or update team project permissions (Legacy)', 'v3/teams/#add-or-update-team-project-permissions-legacy', [], [['permission', str]], 'inertia'), ('/teams/{team_id}/projects/{project_id}', 'delete', 'teams/remove-project-legacy', 'Remove a project from a team (Legacy)', 'v3/teams/#remove-a-project-from-a-team-legacy', [], [], ''), ('/teams/{team_id}/repos', 'get', 'teams/list-repos-legacy', 'List team repositories (Legacy)', 'v3/teams/#list-team-repositories-legacy', ['per_page', 'page'], [], ''), ('/teams/{team_id}/repos/{owner}/{repo}', 'get', 'teams/check-permissions-for-repo-legacy', 'Check team permissions for a repository (Legacy)', 'v3/teams/#check-team-permissions-for-a-repository-legacy', [], [], ''), ('/teams/{team_id}/repos/{owner}/{repo}', 'put', 'teams/add-or-update-repo-permissions-legacy', 'Add or update team repository permissions (Legacy)', 'v3/teams/#add-or-update-team-repository-permissions-legacy', [], [['permission', str]], ''), ('/teams/{team_id}/repos/{owner}/{repo}', 'delete', 'teams/remove-repo-legacy', 'Remove a repository from a team (Legacy)', 'v3/teams/#remove-a-repository-from-a-team-legacy', [], [], ''), ('/teams/{team_id}/team-sync/group-mappings', 'get', 'teams/list-idp-groups-for-legacy', 'List IdP groups for a team (Legacy)', 'rest/reference/teams#list-idp-groups-for-a-team-legacy', [], [], ''), ('/teams/{team_id}/team-sync/group-mappings', 'patch', 'teams/create-or-update-idp-group-connections-legacy', 'Create or update IdP group connections (Legacy)', 'rest/reference/teams#create-or-update-idp-group-connections-legacy', [], [['groups', list], ['synced_at', str]], ''), ('/teams/{team_id}/teams', 'get', 'teams/list-child-legacy', 'List child teams (Legacy)', 'v3/teams/#list-child-teams-legacy', ['per_page', 'page'], [], ''), ('/user', 'get', 'users/get-authenticated', 'Get the authenticated user', 'v3/users/#get-the-authenticated-user', [], [], ''), ('/user', 'patch', 'users/update-authenticated', 'Update the authenticated user', 'v3/users/#update-the-authenticated-user', [], [['name', str], ['email', str], ['blog', str], ['twitter_username', str], ['company', str], ['location', str], ['hireable', bool], ['bio', str]], ''), ('/user/blocks', 'get', 'users/list-blocked-by-authenticated', 'List users blocked by the authenticated user', 'rest/reference/users#list-users-blocked-by-the-authenticated-user', [], [], ''), ('/user/blocks/{username}', 'get', 'users/check-blocked', 'Check if a user is blocked by the authenticated user', 'rest/reference/users#check-if-a-user-is-blocked-by-the-authenticated-user', [], [], ''), ('/user/blocks/{username}', 'put', 'users/block', 'Block a user', 'rest/reference/users#block-a-user', [], [], ''), ('/user/blocks/{username}', 'delete', 'users/unblock', 'Unblock a user', 'rest/reference/users#unblock-a-user', [], [], ''), ('/user/email/visibility', 'patch', 'users/set-primary-email-visibility-for-authenticated', 'Set primary email visibility for the authenticated user', 'rest/reference/users#set-primary-email-visibility-for-the-authenticated-user', [], [['email', str], ['visibility', str]], ''), ('/user/emails', 'get', 'users/list-emails-for-authenticated', 'List email addresses for the authenticated user', 'rest/reference/users#list-email-addresses-for-the-authenticated-user', ['per_page', 'page'], [], ''), ('/user/emails', 'post', 'users/add-email-for-authenticated', 'Add an email address for the authenticated user', 'rest/reference/users#add-an-email-address-for-the-authenticated-user', [], [], ''), ('/user/emails', 'delete', 'users/delete-email-for-authenticated', 'Delete an email address for the authenticated user', 'rest/reference/users#delete-an-email-address-for-the-authenticated-user', [], [], ''), ('/user/followers', 'get', 'users/list-followers-for-authenticated-user', 'List followers of the authenticated user', 'rest/reference/users#list-followers-of-the-authenticated-user', ['per_page', 'page'], [], ''), ('/user/following', 'get', 'users/list-followed-by-authenticated', 'List the people the authenticated user follows', 'rest/reference/users#list-the-people-the-authenticated-user-follows', ['per_page', 'page'], [], ''), ('/user/following/{username}', 'get', 'users/check-person-is-followed-by-authenticated', 'Check if a person is followed by the authenticated user', 'rest/reference/users#check-if-a-person-is-followed-by-the-authenticated-user', [], [], ''), ('/user/following/{username}', 'put', 'users/follow', 'Follow a user', 'rest/reference/users#follow-a-user', [], [], ''), ('/user/following/{username}', 'delete', 'users/unfollow', 'Unfollow a user', 'rest/reference/users#unfollow-a-user', [], [], ''), ('/user/gpg_keys', 'get', 'users/list-gpg-keys-for-authenticated', 'List GPG keys for the authenticated user', 'rest/reference/users#list-gpg-keys-for-the-authenticated-user', ['per_page', 'page'], [], ''), ('/user/gpg_keys', 'post', 'users/create-gpg-key-for-authenticated', 'Create a GPG key for the authenticated user', 'rest/reference/users#create-a-gpg-key-for-the-authenticated-user', [], [['armored_public_key', str]], ''), ('/user/gpg_keys/{gpg_key_id}', 'get', 'users/get-gpg-key-for-authenticated', 'Get a GPG key for the authenticated user', 'rest/reference/users#get-a-gpg-key-for-the-authenticated-user', [], [], ''), ('/user/gpg_keys/{gpg_key_id}', 'delete', 'users/delete-gpg-key-for-authenticated', 'Delete a GPG key for the authenticated user', 'rest/reference/users#delete-a-gpg-key-for-the-authenticated-user', [], [], ''), ('/user/installations', 'get', 'apps/list-installations-for-authenticated-user', 'List app installations accessible to the user access token', 'rest/reference/apps#list-app-installations-accessible-to-the-user-access-token', ['per_page', 'page'], [], ''), ('/user/installations/{installation_id}/repositories', 'get', 'apps/list-installation-repos-for-authenticated-user', 'List repositories accessible to the user access token', 'rest/reference/apps#list-repositories-accessible-to-the-user-access-token', ['per_page', 'page'], [], 'mercy'), ('/user/installations/{installation_id}/repositories/{repository_id}', 'put', 'apps/add-repo-to-installation', 'Add a repository to an app installation', 'rest/reference/apps#add-a-repository-to-an-app-installation', [], [], ''), ('/user/installations/{installation_id}/repositories/{repository_id}', 'delete', 'apps/remove-repo-from-installation', 'Remove a repository from an app installation', 'rest/reference/apps#remove-a-repository-from-an-app-installation', [], [], ''), ('/user/interaction-limits', 'get', 'interactions/get-restrictions-for-your-public-repos', 'Get interaction restrictions for your public repositories', 'rest/reference/interactions#get-interaction-restrictions-for-your-public-repositories', [], [], ''), ('/user/interaction-limits', 'put', 'interactions/set-restrictions-for-your-public-repos', 'Set interaction restrictions for your public repositories', 'rest/reference/interactions#set-interaction-restrictions-for-your-public-repositories', [], [['limit', str], ['expiry', str]], ''), ('/user/interaction-limits', 'delete', 'interactions/remove-restrictions-for-your-public-repos', 'Remove interaction restrictions from your public repositories', 'rest/reference/interactions#remove-interaction-restrictions-from-your-public-repositories', [], [], ''), ('/user/issues', 'get', 'issues/list-for-authenticated-user', 'List user account issues assigned to the authenticated user', 'v3/issues/#list-user-account-issues-assigned-to-the-authenticated-user', ['filter', 'state', 'labels', 'sort', 'direction', 'since', 'per_page', 'page'], [], 'squirrel-girl'), ('/user/keys', 'get', 'users/list-public-ssh-keys-for-authenticated', 'List public SSH keys for the authenticated user', 'rest/reference/users#list-public-ssh-keys-for-the-authenticated-user', ['per_page', 'page'], [], ''), ('/user/keys', 'post', 'users/create-public-ssh-key-for-authenticated', 'Create a public SSH key for the authenticated user', 'rest/reference/users#create-a-public-ssh-key-for-the-authenticated-user', [], [['title', str], ['key', str]], ''), ('/user/keys/{key_id}', 'get', 'users/get-public-ssh-key-for-authenticated', 'Get a public SSH key for the authenticated user', 'rest/reference/users#get-a-public-ssh-key-for-the-authenticated-user', [], [], ''), ('/user/keys/{key_id}', 'delete', 'users/delete-public-ssh-key-for-authenticated', 'Delete a public SSH key for the authenticated user', 'rest/reference/users#delete-a-public-ssh-key-for-the-authenticated-user', [], [], ''), ('/user/marketplace_purchases', 'get', 'apps/list-subscriptions-for-authenticated-user', 'List subscriptions for the authenticated user', 'rest/reference/apps#list-subscriptions-for-the-authenticated-user', ['per_page', 'page'], [], ''), ('/user/marketplace_purchases/stubbed', 'get', 'apps/list-subscriptions-for-authenticated-user-stubbed', 'List subscriptions for the authenticated user (stubbed)', 'rest/reference/apps#list-subscriptions-for-the-authenticated-user-stubbed', ['per_page', 'page'], [], ''), ('/user/memberships/orgs', 'get', 'orgs/list-memberships-for-authenticated-user', 'List organization memberships for the authenticated user', 'rest/reference/orgs#list-organization-memberships-for-the-authenticated-user', ['state', 'per_page', 'page'], [], ''), ('/user/memberships/orgs/{org}', 'get', 'orgs/get-membership-for-authenticated-user', 'Get an organization membership for the authenticated user', 'rest/reference/orgs#get-an-organization-membership-for-the-authenticated-user', [], [], ''), ('/user/memberships/orgs/{org}', 'patch', 'orgs/update-membership-for-authenticated-user', 'Update an organization membership for the authenticated user', 'rest/reference/orgs#update-an-organization-membership-for-the-authenticated-user', [], [['state', str]], ''), ('/user/migrations', 'get', 'migrations/list-for-authenticated-user', 'List user migrations', 'rest/reference/migrations#list-user-migrations', ['per_page', 'page'], [], 'wyandotte'), ('/user/migrations', 'post', 'migrations/start-for-authenticated-user', 'Start a user migration', 'rest/reference/migrations#start-a-user-migration', [], [['lock_repositories', bool], ['exclude_attachments', bool], ['exclude', list], ['repositories', list]], ''), ('/user/migrations/{migration_id}', 'get', 'migrations/get-status-for-authenticated-user', 'Get a user migration status', 'rest/reference/migrations#get-a-user-migration-status', ['exclude'], [], 'wyandotte'), ('/user/migrations/{migration_id}/archive', 'get', 'migrations/get-archive-for-authenticated-user', 'Download a user migration archive', 'rest/reference/migrations#download-a-user-migration-archive', [], [], 'wyandotte'), ('/user/migrations/{migration_id}/archive', 'delete', 'migrations/delete-archive-for-authenticated-user', 'Delete a user migration archive', 'rest/reference/migrations#delete-a-user-migration-archive', [], [], 'wyandotte'), ('/user/migrations/{migration_id}/repos/{repo_name}/lock', 'delete', 'migrations/unlock-repo-for-authenticated-user', 'Unlock a user repository', 'rest/reference/migrations#unlock-a-user-repository', [], [], 'wyandotte'), ('/user/migrations/{migration_id}/repositories', 'get', 'migrations/list-repos-for-user', 'List repositories for a user migration', 'rest/reference/migrations#list-repositories-for-a-user-migration', ['per_page', 'page'], [], 'wyandotte'), ('/user/orgs', 'get', 'orgs/list-for-authenticated-user', 'List organizations for the authenticated user', 'v3/orgs/#list-organizations-for-the-authenticated-user', ['per_page', 'page'], [], ''), ('/user/projects', 'post', 'projects/create-for-authenticated-user', 'Create a user project', 'v3/projects/#create-a-user-project', [], [['name', str], ['body', str]], 'inertia'), ('/user/public_emails', 'get', 'users/list-public-emails-for-authenticated', 'List public email addresses for the authenticated user', 'rest/reference/users#list-public-email-addresses-for-the-authenticated-user', ['per_page', 'page'], [], ''), ('/user/repos', 'get', 'repos/list-for-authenticated-user', 'List repositories for the authenticated user', 'v3/repos/#list-repositories-for-the-authenticated-user', ['visibility', 'affiliation', 'type', 'sort', 'direction', 'per_page', 'page', 'since', 'before'], [], ''), ('/user/repos', 'post', 'repos/create-for-authenticated-user', 'Create a repository for the authenticated user', 'v3/repos/#create-a-repository-for-the-authenticated-user', [], [['name', str], ['description', str], ['homepage', str], ['private', bool, False], ['has_issues', bool, True], ['has_projects', bool, True], ['has_wiki', bool, True], ['team_id', int], ['auto_init', bool, False], ['gitignore_template', str], ['license_template', str], ['allow_squash_merge', bool, True], ['allow_merge_commit', bool, True], ['allow_rebase_merge', bool, True], ['delete_branch_on_merge', bool, False], ['has_downloads', bool, True], ['is_template', bool, False]], 'nebula'), ('/user/repository_invitations', 'get', 'repos/list-invitations-for-authenticated-user', 'List repository invitations for the authenticated user', 'rest/reference/repos#list-repository-invitations-for-the-authenticated-user', ['per_page', 'page'], [], ''), ('/user/repository_invitations/{invitation_id}', 'patch', 'repos/accept-invitation', 'Accept a repository invitation', 'rest/reference/repos#accept-a-repository-invitation', [], [], ''), ('/user/repository_invitations/{invitation_id}', 'delete', 'repos/decline-invitation', 'Decline a repository invitation', 'rest/reference/repos#decline-a-repository-invitation', [], [], ''), ('/user/starred', 'get', 'activity/list-repos-starred-by-authenticated-user', 'List repositories starred by the authenticated user', 'rest/reference/activity#list-repositories-starred-by-the-authenticated-user', ['sort', 'direction', 'per_page', 'page'], [], ''), ('/user/starred/{owner}/{repo}', 'get', 'activity/check-repo-is-starred-by-authenticated-user', 'Check if a repository is starred by the authenticated user', 'rest/reference/activity#check-if-a-repository-is-starred-by-the-authenticated-user', [], [], ''), ('/user/starred/{owner}/{repo}', 'put', 'activity/star-repo-for-authenticated-user', 'Star a repository for the authenticated user', 'rest/reference/activity#star-a-repository-for-the-authenticated-user', [], [], ''), ('/user/starred/{owner}/{repo}', 'delete', 'activity/unstar-repo-for-authenticated-user', 'Unstar a repository for the authenticated user', 'rest/reference/activity#unstar-a-repository-for-the-authenticated-user', [], [], ''), ('/user/subscriptions', 'get', 'activity/list-watched-repos-for-authenticated-user', 'List repositories watched by the authenticated user', 'rest/reference/activity#list-repositories-watched-by-the-authenticated-user', ['per_page', 'page'], [], ''), ('/user/teams', 'get', 'teams/list-for-authenticated-user', 'List teams for the authenticated user', 'v3/teams/#list-teams-for-the-authenticated-user', ['per_page', 'page'], [], ''), ('/users', 'get', 'users/list', 'List users', 'v3/users/#list-users', ['since', 'per_page'], [], ''), ('/users/{username}', 'get', 'users/get-by-username', 'Get a user', 'v3/users/#get-a-user', [], [], ''), ('/users/{username}/events', 'get', 'activity/list-events-for-authenticated-user', 'List events for the authenticated user', 'rest/reference/activity#list-events-for-the-authenticated-user', ['per_page', 'page'], [], ''), ('/users/{username}/events/orgs/{org}', 'get', 'activity/list-org-events-for-authenticated-user', 'List organization events for the authenticated user', 'rest/reference/activity#list-organization-events-for-the-authenticated-user', ['per_page', 'page'], [], ''), ('/users/{username}/events/public', 'get', 'activity/list-public-events-for-user', 'List public events for a user', 'rest/reference/activity#list-public-events-for-a-user', ['per_page', 'page'], [], ''), ('/users/{username}/followers', 'get', 'users/list-followers-for-user', 'List followers of a user', 'rest/reference/users#list-followers-of-a-user', ['per_page', 'page'], [], ''), ('/users/{username}/following', 'get', 'users/list-following-for-user', 'List the people a user follows', 'rest/reference/users#list-the-people-a-user-follows', ['per_page', 'page'], [], ''), ('/users/{username}/following/{target_user}', 'get', 'users/check-following-for-user', 'Check if a user follows another user', 'rest/reference/users#check-if-a-user-follows-another-user', [], [], ''), ('/users/{username}/gists', 'get', 'gists/list-for-user', 'List gists for a user', 'v3/gists/#list-gists-for-a-user', ['since', 'per_page', 'page'], [], ''), ('/users/{username}/gpg_keys', 'get', 'users/list-gpg-keys-for-user', 'List GPG keys for a user', 'rest/reference/users#list-gpg-keys-for-a-user', ['per_page', 'page'], [], ''), ('/users/{username}/hovercard', 'get', 'users/get-context-for-user', 'Get contextual information for a user', 'v3/users/#get-contextual-information-for-a-user', ['subject_type', 'subject_id'], [], ''), ('/users/{username}/installation', 'get', 'apps/get-user-installation', 'Get a user installation for the authenticated app', 'v3/apps/#get-a-user-installation-for-the-authenticated-app', [], [], ''), ('/users/{username}/keys', 'get', 'users/list-public-keys-for-user', 'List public keys for a user', 'rest/reference/users#list-public-keys-for-a-user', ['per_page', 'page'], [], ''), ('/users/{username}/orgs', 'get', 'orgs/list-for-user', 'List organizations for a user', 'v3/orgs/#list-organizations-for-a-user', ['per_page', 'page'], [], ''), ('/users/{username}/projects', 'get', 'projects/list-for-user', 'List user projects', 'v3/projects/#list-user-projects', ['state', 'per_page', 'page'], [], 'inertia'), ('/users/{username}/received_events', 'get', 'activity/list-received-events-for-user', 'List events received by the authenticated user', 'rest/reference/activity#list-events-received-by-the-authenticated-user', ['per_page', 'page'], [], ''), ('/users/{username}/received_events/public', 'get', 'activity/list-received-public-events-for-user', 'List public events received by a user', 'rest/reference/activity#list-public-events-received-by-a-user', ['per_page', 'page'], [], ''), ('/users/{username}/repos', 'get', 'repos/list-for-user', 'List repositories for a user', 'v3/repos/#list-repositories-for-a-user', ['type', 'sort', 'direction', 'per_page', 'page'], [], 'nebula'), ('/users/{username}/settings/billing/actions', 'get', 'billing/get-github-actions-billing-user', 'Get GitHub Actions billing for a user', 'v3/billing/#get-github-actions-billing-for-a-user', [], [], ''), ('/users/{username}/settings/billing/packages', 'get', 'billing/get-github-packages-billing-user', 'Get GitHub Packages billing for a user', 'v3/billing/#get-github-packages-billing-for-a-user', [], [], ''), ('/users/{username}/settings/billing/shared-storage', 'get', 'billing/get-shared-storage-billing-user', 'Get shared storage billing for a user', 'v3/billing/#get-shared-storage-billing-for-a-user', [], [], ''), ('/users/{username}/starred', 'get', 'activity/list-repos-starred-by-user', 'List repositories starred by a user', 'rest/reference/activity#list-repositories-starred-by-a-user', ['sort', 'direction', 'per_page', 'page'], [], ''), ('/users/{username}/subscriptions', 'get', 'activity/list-repos-watched-by-user', 'List repositories watched by a user', 'rest/reference/activity#list-repositories-watched-by-a-user', ['per_page', 'page'], [], '')]
172.81808
358
0.69049
18335dd61bb9a5752f51922d9869c7f4398e3b5a
1,835
py
Python
tools/mo/unit_tests/mo/front/mxnet/activation_test.py
ryanloney/openvino-1
4e0a740eb3ee31062ba0df88fcf438564f67edb7
[ "Apache-2.0" ]
1,127
2018-10-15T14:36:58.000Z
2020-04-20T09:29:44.000Z
tools/mo/unit_tests/mo/front/mxnet/activation_test.py
ryanloney/openvino-1
4e0a740eb3ee31062ba0df88fcf438564f67edb7
[ "Apache-2.0" ]
439
2018-10-20T04:40:35.000Z
2020-04-19T05:56:25.000Z
tools/mo/unit_tests/mo/front/mxnet/activation_test.py
ryanloney/openvino-1
4e0a740eb3ee31062ba0df88fcf438564f67edb7
[ "Apache-2.0" ]
414
2018-10-17T05:53:46.000Z
2020-04-16T17:29:53.000Z
# Copyright (C) 2018-2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import unittest from openvino.tools.mo.front.mxnet.activation import ActivationFrontExtractor from openvino.tools.mo.graph.graph import Node from unit_tests.utils.graph import build_graph class TestActivationFrontExtractorOp(unittest.TestCase): def test_extract_sigmoid_layer(self): graph = build_graph( {'node_1': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, 'act_node': {'type': 'Activation', 'kind': 'op', 'op': 'Activation', }, 'node_2': {'type': 'Identity', 'kind': 'op'}, }, [ ('node_1', 'act_node'), ('act_node', 'node_2'), ], { 'act_node': {'symbol_dict': {'attrs': {'act_type': 'sigmoid'}}}, }) act_node = Node(graph, 'act_node') act_extr_op = ActivationFrontExtractor() supported = act_extr_op.extract(act_node) self.assertTrue(supported) self.assertEqual(act_node['op'], 'Sigmoid') def test_extract_relu_layer(self): graph = build_graph( {'node_1': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, 'act_node': {'type': 'relu', 'kind': 'op', 'op': 'Activation', }, 'node_2': {'type': 'Identity', 'kind': 'op'}, }, [ ('node_1', 'act_node'), ('act_node', 'node_2'), ], { 'act_node': {'symbol_dict': {'attrs': {'act_type': 'relu'}}}, }) act_node = Node(graph, 'act_node') act_extr_op = ActivationFrontExtractor() supported = act_extr_op.extract(act_node) self.assertTrue(supported) self.assertEqual(act_node['op'], 'ReLU')
35.980392
84
0.541689
e2c037701a0ff802a442907237857e6fc38da855
24,760
py
Python
tests/conftest.py
sdebruyn/hass-auth-synology
c37f2071d666140a17c4b8e4ec4dd226fafbc31f
[ "Apache-2.0" ]
3
2021-11-13T00:56:01.000Z
2021-12-22T06:32:18.000Z
tests/conftest.py
sdebruyn/hass-auth-synology
c37f2071d666140a17c4b8e4ec4dd226fafbc31f
[ "Apache-2.0" ]
34
2021-11-16T17:27:27.000Z
2022-03-31T17:31:41.000Z
tests/conftest.py
sdebruyn/hass-auth-synology
c37f2071d666140a17c4b8e4ec4dd226fafbc31f
[ "Apache-2.0" ]
null
null
null
"""Set up some common test helper things.""" import asyncio import datetime import functools import logging import socket import ssl import threading from unittest.mock import AsyncMock, MagicMock, Mock, patch import multidict import pytest import pytest_socket import requests_mock as _requests_mock from aiohttp.test_utils import make_mocked_request from homeassistant import core as ha from homeassistant import loader, runner, util from homeassistant.auth.const import GROUP_ID_ADMIN, GROUP_ID_READ_ONLY from homeassistant.auth.models import Credentials from homeassistant.auth.providers import homeassistant, legacy_api_password from homeassistant.components import mqtt, recorder from homeassistant.components.websocket_api.auth import ( TYPE_AUTH, TYPE_AUTH_OK, TYPE_AUTH_REQUIRED, ) from homeassistant.components.websocket_api.http import URL from homeassistant.const import ATTR_NOW, EVENT_TIME_CHANGED, HASSIO_USER_NAME from homeassistant.helpers import config_entry_oauth2_flow, event from homeassistant.setup import async_setup_component from homeassistant.util import location from tests.ignore_uncaught_exceptions import IGNORE_UNCAUGHT_EXCEPTIONS pytest.register_assert_rewrite("tests.common") from tests.common import ( # noqa: E402, isort:skip CLIENT_ID, INSTANCES, MockConfigEntry, MockUser, async_fire_mqtt_message, async_test_home_assistant, get_test_home_assistant, init_recorder_component, mock_storage as mock_storage, ) from tests.test_util.aiohttp import mock_aiohttp_client # noqa: E402, isort:skip logging.basicConfig(level=logging.DEBUG) logging.getLogger("sqlalchemy.engine").setLevel(logging.INFO) asyncio.set_event_loop_policy(runner.HassEventLoopPolicy(False)) # Disable fixtures overriding our beautiful policy asyncio.set_event_loop_policy = lambda policy: None def pytest_configure(config): """Register marker for tests that log exceptions.""" config.addinivalue_line( "markers", "no_fail_on_log_exception: mark test to not fail on logged exception" ) def pytest_runtest_setup(): """Prepare pytest_socket and freezegun. pytest_socket: Throw if tests attempt to open sockets. allow_unix_socket is set to True because it's needed by asyncio. Important: socket_allow_hosts must be called before disable_socket, otherwise all destinations will be allowed. freezegun: Modified to include https://github.com/spulec/freezegun/pull/424 """ pytest_socket.socket_allow_hosts(["127.0.0.1"]) disable_socket(allow_unix_socket=True) @pytest.fixture def socket_disabled(pytestconfig): """Disable socket.socket for duration of this test function. This incorporates changes from https://github.com/miketheman/pytest-socket/pull/76 and hardcodes allow_unix_socket to True because it's not passed on the command line. """ socket_was_enabled = socket.socket == pytest_socket._true_socket disable_socket(allow_unix_socket=True) yield if socket_was_enabled: pytest_socket.enable_socket() @pytest.fixture def socket_enabled(pytestconfig): """Enable socket.socket for duration of this test function. This incorporates changes from https://github.com/miketheman/pytest-socket/pull/76 and hardcodes allow_unix_socket to True because it's not passed on the command line. """ socket_was_disabled = socket.socket != pytest_socket._true_socket pytest_socket.enable_socket() yield if socket_was_disabled: disable_socket(allow_unix_socket=True) def disable_socket(allow_unix_socket=False): """Disable socket.socket to disable the Internet. useful in testing. This incorporates changes from https://github.com/miketheman/pytest-socket/pull/75 """ class GuardedSocket(socket.socket): """socket guard to disable socket creation (from pytest-socket).""" def __new__(cls, *args, **kwargs): try: if len(args) > 0: is_unix_socket = args[0] == socket.AF_UNIX else: is_unix_socket = kwargs.get("family") == socket.AF_UNIX except AttributeError: # AF_UNIX not supported on Windows https://bugs.python.org/issue33408 is_unix_socket = False if is_unix_socket and allow_unix_socket: return super().__new__(cls, *args, **kwargs) raise pytest_socket.SocketBlockedError() socket.socket = GuardedSocket def check_real(func): """Force a function to require a keyword _test_real to be passed in.""" @functools.wraps(func) async def guard_func(*args, **kwargs): real = kwargs.pop("_test_real", None) if not real: raise Exception( 'Forgot to mock or pass "_test_real=True" to %s', func.__name__ ) return await func(*args, **kwargs) return guard_func # Guard a few functions that would make network connections location.async_detect_location_info = check_real(location.async_detect_location_info) util.get_local_ip = lambda: "127.0.0.1" @pytest.fixture(autouse=True) def verify_cleanup(): """Verify that the test has cleaned up resources correctly.""" threads_before = frozenset(threading.enumerate()) yield if len(INSTANCES) >= 2: count = len(INSTANCES) for inst in INSTANCES: inst.stop() pytest.exit(f"Detected non stopped instances ({count}), aborting test run") threads = frozenset(threading.enumerate()) - threads_before assert not threads @pytest.fixture(autouse=True) def bcrypt_cost(): """Run with reduced rounds during tests, to speed up uses.""" import bcrypt gensalt_orig = bcrypt.gensalt def gensalt_mock(rounds=12, prefix=b"2b"): return gensalt_orig(4, prefix) bcrypt.gensalt = gensalt_mock yield bcrypt.gensalt = gensalt_orig @pytest.fixture def hass_storage(): """Fixture to mock storage.""" with mock_storage() as stored_data: yield stored_data @pytest.fixture def load_registries(): """Fixture to control the loading of registries when setting up the hass fixture. To avoid loading the registries, tests can be marked with: @pytest.mark.parametrize("load_registries", [False]) """ return True @pytest.fixture def hass(loop, load_registries, hass_storage, request): """Fixture to provide a test instance of Home Assistant.""" def exc_handle(loop, context): """Handle exceptions by rethrowing them, which will fail the test.""" # Most of these contexts will contain an exception, but not all. # The docs note the key as "optional" # See https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.loop.call_exception_handler if "exception" in context: exceptions.append(context["exception"]) else: exceptions.append( Exception( "Received exception handler without exception, but with message: %s" % context["message"] ) ) orig_exception_handler(loop, context) exceptions = [] hass = loop.run_until_complete(async_test_home_assistant(loop, load_registries)) orig_exception_handler = loop.get_exception_handler() loop.set_exception_handler(exc_handle) yield hass loop.run_until_complete(hass.async_stop(force=True)) for ex in exceptions: if ( request.module.__name__, request.function.__name__, ) in IGNORE_UNCAUGHT_EXCEPTIONS: continue raise ex @pytest.fixture async def stop_hass(): """Make sure all hass are stopped.""" orig_hass = ha.HomeAssistant created = [] def mock_hass(): hass_inst = orig_hass() created.append(hass_inst) return hass_inst with patch("homeassistant.core.HomeAssistant", mock_hass): yield for hass_inst in created: if hass_inst.state == ha.CoreState.stopped: continue with patch.object(hass_inst.loop, "stop"): await hass_inst.async_block_till_done() await hass_inst.async_stop(force=True) @pytest.fixture def requests_mock(): """Fixture to provide a requests mocker.""" with _requests_mock.mock() as m: yield m @pytest.fixture def aioclient_mock(): """Fixture to mock aioclient calls.""" with mock_aiohttp_client() as mock_session: yield mock_session @pytest.fixture def mock_device_tracker_conf(): """Prevent device tracker from reading/writing data.""" devices = [] async def mock_update_config(path, id, entity): devices.append(entity) with patch( "homeassistant.components.device_tracker.legacy" ".DeviceTracker.async_update_config", side_effect=mock_update_config, ), patch( "homeassistant.components.device_tracker.legacy.async_load_config", side_effect=lambda *args: devices, ): yield devices @pytest.fixture async def hass_admin_credential(hass, local_auth): """Provide credentials for admin user.""" return Credentials( id="mock-credential-id", auth_provider_type="homeassistant", auth_provider_id=None, data={"username": "admin"}, is_new=False, ) @pytest.fixture async def hass_access_token(hass, hass_admin_user, hass_admin_credential): """Return an access token to access Home Assistant.""" await hass.auth.async_link_user(hass_admin_user, hass_admin_credential) refresh_token = await hass.auth.async_create_refresh_token( hass_admin_user, CLIENT_ID, credential=hass_admin_credential ) return hass.auth.async_create_access_token(refresh_token) @pytest.fixture def hass_owner_user(hass, local_auth): """Return a Home Assistant admin user.""" return MockUser(is_owner=True).add_to_hass(hass) @pytest.fixture def hass_admin_user(hass, local_auth): """Return a Home Assistant admin user.""" admin_group = hass.loop.run_until_complete( hass.auth.async_get_group(GROUP_ID_ADMIN) ) return MockUser(groups=[admin_group]).add_to_hass(hass) @pytest.fixture def hass_read_only_user(hass, local_auth): """Return a Home Assistant read only user.""" read_only_group = hass.loop.run_until_complete( hass.auth.async_get_group(GROUP_ID_READ_ONLY) ) return MockUser(groups=[read_only_group]).add_to_hass(hass) @pytest.fixture def hass_read_only_access_token(hass, hass_read_only_user, local_auth): """Return a Home Assistant read only user.""" credential = Credentials( id="mock-readonly-credential-id", auth_provider_type="homeassistant", auth_provider_id=None, data={"username": "readonly"}, is_new=False, ) hass_read_only_user.credentials.append(credential) refresh_token = hass.loop.run_until_complete( hass.auth.async_create_refresh_token( hass_read_only_user, CLIENT_ID, credential=credential ) ) return hass.auth.async_create_access_token(refresh_token) @pytest.fixture def hass_supervisor_user(hass, local_auth): """Return the Home Assistant Supervisor user.""" admin_group = hass.loop.run_until_complete( hass.auth.async_get_group(GROUP_ID_ADMIN) ) return MockUser( name=HASSIO_USER_NAME, groups=[admin_group], system_generated=True ).add_to_hass(hass) @pytest.fixture def hass_supervisor_access_token(hass, hass_supervisor_user, local_auth): """Return a Home Assistant Supervisor access token.""" refresh_token = hass.loop.run_until_complete( hass.auth.async_create_refresh_token(hass_supervisor_user) ) return hass.auth.async_create_access_token(refresh_token) @pytest.fixture def legacy_auth(hass): """Load legacy API password provider.""" prv = legacy_api_password.LegacyApiPasswordAuthProvider( hass, hass.auth._store, {"type": "legacy_api_password", "api_password": "test-password"}, ) hass.auth._providers[(prv.type, prv.id)] = prv return prv @pytest.fixture def local_auth(hass): """Load local auth provider.""" prv = homeassistant.HassAuthProvider( hass, hass.auth._store, {"type": "homeassistant"} ) hass.loop.run_until_complete(prv.async_initialize()) hass.auth._providers[(prv.type, prv.id)] = prv return prv @pytest.fixture def hass_client(hass, aiohttp_client, hass_access_token, socket_enabled): """Return an authenticated HTTP client.""" async def auth_client(): """Return an authenticated client.""" return await aiohttp_client( hass.http.app, headers={"Authorization": f"Bearer {hass_access_token}"} ) return auth_client @pytest.fixture def hass_client_no_auth(hass, aiohttp_client, socket_enabled): """Return an unauthenticated HTTP client.""" async def client(): """Return an authenticated client.""" return await aiohttp_client(hass.http.app) return client @pytest.fixture def current_request(): """Mock current request.""" with patch("homeassistant.components.http.current_request") as mock_request_context: mocked_request = make_mocked_request( "GET", "/some/request", headers={"Host": "example.com"}, sslcontext=ssl.SSLContext(ssl.PROTOCOL_TLS), ) mock_request_context.get.return_value = mocked_request yield mock_request_context @pytest.fixture def current_request_with_host(current_request): """Mock current request with a host header.""" new_headers = multidict.CIMultiDict(current_request.get.return_value.headers) new_headers[config_entry_oauth2_flow.HEADER_FRONTEND_BASE] = "https://example.com" current_request.get.return_value = current_request.get.return_value.clone( headers=new_headers ) @pytest.fixture def hass_ws_client(aiohttp_client, hass_access_token, hass, socket_enabled): """Websocket client fixture connected to websocket server.""" async def create_client(hass=hass, access_token=hass_access_token): """Create a websocket client.""" assert await async_setup_component(hass, "websocket_api", {}) client = await aiohttp_client(hass.http.app) websocket = await client.ws_connect(URL) auth_resp = await websocket.receive_json() assert auth_resp["type"] == TYPE_AUTH_REQUIRED if access_token is None: await websocket.send_json({"type": TYPE_AUTH, "access_token": "incorrect"}) else: await websocket.send_json({"type": TYPE_AUTH, "access_token": access_token}) auth_ok = await websocket.receive_json() assert auth_ok["type"] == TYPE_AUTH_OK # wrap in client websocket.client = client return websocket return create_client @pytest.fixture(autouse=True) def fail_on_log_exception(request, monkeypatch): """Fixture to fail if a callback wrapped by catch_log_exception or coroutine wrapped by async_create_catching_coro throws.""" if "no_fail_on_log_exception" in request.keywords: return def log_exception(format_err, *args): raise monkeypatch.setattr("homeassistant.util.logging.log_exception", log_exception) @pytest.fixture def mqtt_config(): """Fixture to allow overriding MQTT config.""" return None @pytest.fixture def mqtt_client_mock(hass): """Fixture to mock MQTT client.""" mid = 0 def get_mid(): nonlocal mid mid += 1 return mid class FakeInfo: def __init__(self, mid): self.mid = mid self.rc = 0 with patch("paho.mqtt.client.Client") as mock_client: @ha.callback def _async_fire_mqtt_message(topic, payload, qos, retain): async_fire_mqtt_message(hass, topic, payload, qos, retain) mid = get_mid() mock_client.on_publish(0, 0, mid) return FakeInfo(mid) def _subscribe(topic, qos=0): mid = get_mid() mock_client.on_subscribe(0, 0, mid) return (0, mid) def _unsubscribe(topic): mid = get_mid() mock_client.on_unsubscribe(0, 0, mid) return (0, mid) mock_client = mock_client.return_value mock_client.connect.return_value = 0 mock_client.subscribe.side_effect = _subscribe mock_client.unsubscribe.side_effect = _unsubscribe mock_client.publish.side_effect = _async_fire_mqtt_message yield mock_client @pytest.fixture async def mqtt_mock(hass, mqtt_client_mock, mqtt_config): """Fixture to mock MQTT component.""" if mqtt_config is None: mqtt_config = {mqtt.CONF_BROKER: "mock-broker", mqtt.CONF_BIRTH_MESSAGE: {}} await hass.async_block_till_done() entry = MockConfigEntry( data=mqtt_config, domain=mqtt.DOMAIN, title="Tasmota", ) entry.add_to_hass(hass) assert await hass.config_entries.async_setup(entry.entry_id) await hass.async_block_till_done() mqtt_component_mock = MagicMock( return_value=hass.data["mqtt"], spec_set=hass.data["mqtt"], wraps=hass.data["mqtt"], ) mqtt_component_mock.conf = hass.data["mqtt"].conf # For diagnostics mqtt_component_mock._mqttc = mqtt_client_mock hass.data["mqtt"] = mqtt_component_mock component = hass.data["mqtt"] component.reset_mock() return component @pytest.fixture(autouse=True) def mock_get_source_ip(): """Mock network util's async_get_source_ip.""" with patch( "homeassistant.components.network.util.async_get_source_ip", return_value="10.10.10.10", ): yield @pytest.fixture def mock_zeroconf(): """Mock zeroconf.""" with patch("homeassistant.components.zeroconf.HaZeroconf", autospec=True), patch( "homeassistant.components.zeroconf.HaAsyncServiceBrowser", autospec=True ): yield @pytest.fixture def mock_async_zeroconf(mock_zeroconf): """Mock AsyncZeroconf.""" with patch("homeassistant.components.zeroconf.HaAsyncZeroconf") as mock_aiozc: zc = mock_aiozc.return_value zc.async_unregister_service = AsyncMock() zc.async_register_service = AsyncMock() zc.async_update_service = AsyncMock() zc.zeroconf.async_wait_for_start = AsyncMock() zc.zeroconf.done = False zc.async_close = AsyncMock() zc.ha_async_close = AsyncMock() yield zc @pytest.fixture def legacy_patchable_time(): """Allow time to be patchable by using event listeners instead of asyncio loop.""" @ha.callback @loader.bind_hass def async_track_point_in_utc_time(hass, action, point_in_time): """Add a listener that fires once after a specific point in UTC time.""" # Ensure point_in_time is UTC point_in_time = event.dt_util.as_utc(point_in_time) # Since this is called once, we accept a HassJob so we can avoid # having to figure out how to call the action every time its called. job = action if isinstance(action, ha.HassJob) else ha.HassJob(action) @ha.callback def point_in_time_listener(event): """Listen for matching time_changed events.""" now = event.data[ATTR_NOW] if now < point_in_time or hasattr(point_in_time_listener, "run"): return # Set variable so that we will never run twice. # Because the event bus might have to wait till a thread comes # available to execute this listener it might occur that the # listener gets lined up twice to be executed. This will make # sure the second time it does nothing. setattr(point_in_time_listener, "run", True) async_unsub() hass.async_run_hass_job(job, now) async_unsub = hass.bus.async_listen(EVENT_TIME_CHANGED, point_in_time_listener) return async_unsub @ha.callback @loader.bind_hass def async_track_utc_time_change( hass, action, hour=None, minute=None, second=None, local=False ): """Add a listener that will fire if time matches a pattern.""" job = ha.HassJob(action) # We do not have to wrap the function with time pattern matching logic # if no pattern given if all(val is None for val in (hour, minute, second)): @ha.callback def time_change_listener(ev) -> None: """Fire every time event that comes in.""" hass.async_run_hass_job(job, ev.data[ATTR_NOW]) return hass.bus.async_listen(EVENT_TIME_CHANGED, time_change_listener) matching_seconds = event.dt_util.parse_time_expression(second, 0, 59) matching_minutes = event.dt_util.parse_time_expression(minute, 0, 59) matching_hours = event.dt_util.parse_time_expression(hour, 0, 23) next_time = None def calculate_next(now) -> None: """Calculate and set the next time the trigger should fire.""" nonlocal next_time localized_now = event.dt_util.as_local(now) if local else now next_time = event.dt_util.find_next_time_expression_time( localized_now, matching_seconds, matching_minutes, matching_hours ) # Make sure rolling back the clock doesn't prevent the timer from # triggering. last_now = None @ha.callback def pattern_time_change_listener(ev) -> None: """Listen for matching time_changed events.""" nonlocal next_time, last_now now = ev.data[ATTR_NOW] if last_now is None or now < last_now: # Time rolled back or next time not yet calculated calculate_next(now) last_now = now if next_time <= now: hass.async_run_hass_job( job, event.dt_util.as_local(now) if local else now ) calculate_next(now + datetime.timedelta(seconds=1)) # We can't use async_track_point_in_utc_time here because it would # break in the case that the system time abruptly jumps backwards. # Our custom last_now logic takes care of resolving that scenario. return hass.bus.async_listen(EVENT_TIME_CHANGED, pattern_time_change_listener) with patch( "homeassistant.helpers.event.async_track_point_in_utc_time", async_track_point_in_utc_time, ), patch( "homeassistant.helpers.event.async_track_utc_time_change", async_track_utc_time_change, ): yield @pytest.fixture def enable_custom_integrations(hass): """Enable custom integrations defined in the test dir.""" hass.data.pop(loader.DATA_CUSTOM_COMPONENTS) @pytest.fixture def enable_statistics(): """Fixture to control enabling of recorder's statistics compilation. To enable statistics, tests can be marked with: @pytest.mark.parametrize("enable_statistics", [True]) """ return False @pytest.fixture def hass_recorder(enable_statistics, hass_storage): """Home Assistant fixture with in-memory recorder.""" hass = get_test_home_assistant() stats = recorder.Recorder.async_periodic_statistics if enable_statistics else None with patch( "homeassistant.components.recorder.Recorder.async_periodic_statistics", side_effect=stats, autospec=True, ): def setup_recorder(config=None): """Set up with params.""" init_recorder_component(hass, config) hass.start() hass.block_till_done() hass.data[recorder.DATA_INSTANCE].block_till_done() return hass yield setup_recorder hass.stop() @pytest.fixture def mock_integration_frame(): """Mock as if we're calling code from inside an integration.""" correct_frame = Mock( filename="/home/paulus/homeassistant/components/hue/light.py", lineno="23", line="self.light.is_on", ) with patch( "homeassistant.helpers.frame.extract_stack", return_value=[ Mock( filename="/home/paulus/homeassistant/core.py", lineno="23", line="do_something()", ), correct_frame, Mock( filename="/home/paulus/aiohue/lights.py", lineno="2", line="something()", ), ], ): yield correct_frame
31.461245
129
0.682512
59dafabd12fc6cae66c939a51082c0571da4c29a
15,499
py
Python
sympy/printing/tests/test_latex.py
Narsil/sympy
4d837e074b871af351b42591697fe126411a910f
[ "BSD-3-Clause" ]
null
null
null
sympy/printing/tests/test_latex.py
Narsil/sympy
4d837e074b871af351b42591697fe126411a910f
[ "BSD-3-Clause" ]
null
null
null
sympy/printing/tests/test_latex.py
Narsil/sympy
4d837e074b871af351b42591697fe126411a910f
[ "BSD-3-Clause" ]
1
2022-03-21T09:07:27.000Z
2022-03-21T09:07:27.000Z
from sympy import (symbols, Rational, Symbol, Integral, log, diff, sin, exp, Function, factorial, floor, ceiling, Abs, re, im, conjugate, gamma, Order, Piecewise, Matrix, asin, Interval, EmptySet, Union, S, Sum, Limit, oo, Poly, Float, lowergamma, uppergamma, hyper, meijerg, Lambda, Poly, RootOf, RootSum, sqrt, Dict) from sympy.abc import mu, tau from sympy.printing.latex import latex from sympy.utilities.pytest import XFAIL, raises from sympy.functions import DiracDelta x, y, z, t = symbols('x y z t') k, n = symbols('k n', integer=True) def test_printmethod(): class R(Abs): def _latex(self, printer): return "foo(%s)" % printer._print(self.args[0]) assert latex(R(x)) == "foo(x)" class R(Abs): def _latex(self, printer): return "foo" assert latex(R(x)) == "foo" def test_latex_basic(): assert latex(1+x) == "x + 1" assert latex(x**2) == "x^{2}" assert latex(x**(1+x)) == "x^{x + 1}" assert latex(x**3+x+1+x**2) == "x^{3} + x^{2} + x + 1" assert latex(2*x*y) == "2 x y" assert latex(2*x*y, mul_symbol='dot') == r"2 \cdot x \cdot y" assert latex(sqrt(x)) == r"\sqrt{x}" assert latex(x**Rational(1,3)) == r"\sqrt[3]{x}" assert latex(sqrt(x)**3) == r"x^{\frac{3}{2}}" assert latex(sqrt(x),itex=True) == r"\sqrt{x}" assert latex(x**Rational(1,3),itex=True) == r"\root{3}{x}" assert latex(sqrt(x)**3,itex=True) == r"x^{\frac{3}{2}}" assert latex(x**Rational(3,4)) == r"x^{\frac{3}{4}}" assert latex(x**Rational(3,4), fold_frac_powers=True) == "x^{3/4}" assert latex(1.5e20*x) == r"1.5 \times 10^{20} x" assert latex(1.5e20*x, mul_symbol='dot') == r"1.5 \cdot 10^{20} \cdot x" def test_latex_Float(): assert latex(Float(1.0e100)) == r"1.0 \times 10^{100}" assert latex(Float(1.0e-100)) == r"1.0 \times 10^{-100}" assert latex(Float(1.0e-100), mul_symbol="dot") == r"1.0 \cdot 10^{-100}" assert latex(1.0*oo) == r"\infty" assert latex(-1.0*oo) == r"- \infty" def test_latex_symbols(): Gamma, lmbda, rho = map(Symbol, ('Gamma', 'lambda', 'rho')) mass, volume = map(Symbol, ('mass', 'volume')) assert latex(Gamma + lmbda) == r"\Gamma + \lambda" assert latex(Gamma * lmbda) == r"\Gamma \lambda" assert latex(Symbol('q21')) == r"q_{21}" assert latex(Symbol('epsilon0')) == r"\epsilon_{0}" assert latex(Symbol('91')) == r"91" assert latex(Symbol('alpha_new')) == r"\alpha_{new}" assert latex(Symbol('C^orig')) == r"C^{orig}" #assert latex(volume * rho == mass) == r"\rho \mathrm{volume} = \mathrm{mass}" #assert latex(volume / mass * rho == 1) == r"\rho \mathrm{volume} {\mathrm{mass}}^{(-1)} = 1" #assert latex(mass**3 * volume**3) == r"{\mathrm{mass}}^{3} \cdot {\mathrm{volume}}^{3}" def test_latex_functions(): assert latex(exp(x)) == "e^{x}" assert latex(exp(1)+exp(2)) == "e + e^{2}" f = Function('f') assert latex(f(x)) == '\\operatorname{f}\\left(x\\right)' beta = Function('beta') assert latex(beta(x)) == r"\operatorname{beta}\left(x\right)" assert latex(sin(x)) == r"\operatorname{sin}\left(x\right)" assert latex(sin(x), fold_func_brackets=True) == r"\operatorname{sin}x" assert latex(sin(2*x**2), fold_func_brackets=True) == \ r"\operatorname{sin}2 x^{2}" assert latex(sin(x**2), fold_func_brackets=True) == \ r"\operatorname{sin}x^{2}" assert latex(asin(x)**2) == r"\operatorname{asin}^{2}\left(x\right)" assert latex(asin(x)**2,inv_trig_style="full") == \ r"\operatorname{arcsin}^{2}\left(x\right)" assert latex(asin(x)**2,inv_trig_style="power") == \ r"\operatorname{sin}^{-1}\left(x\right)^{2}" assert latex(asin(x**2),inv_trig_style="power",fold_func_brackets=True) == \ r"\operatorname{sin}^{-1}x^{2}" assert latex(factorial(k)) == r"k!" assert latex(factorial(-k)) == r"\left(- k\right)!" assert latex(floor(x)) == r"\lfloor{x}\rfloor" assert latex(ceiling(x)) == r"\lceil{x}\rceil" assert latex(Abs(x)) == r"\lvert{x}\rvert" assert latex(re(x)) == r"\Re{x}" assert latex(im(x)) == r"\Im{x}" assert latex(conjugate(x)) == r"\overline{x}" assert latex(gamma(x)) == r"\operatorname{\Gamma}\left(x\right)" assert latex(Order(x)) == r"\operatorname{\mathcal{O}}\left(x\right)" assert latex(lowergamma(x, y)) == r'\operatorname{\gamma}\left(x, y\right)' assert latex(uppergamma(x, y)) == r'\operatorname{\Gamma}\left(x, y\right)' def test_hyper_printing(): from sympy import pi, Tuple from sympy.abc import x, z assert latex(meijerg(Tuple(pi, pi, x), Tuple(1), \ (0,1), Tuple(1, 2, 3/pi),z)) == \ r'{G_{4, 5}^{2, 3}\left.\left(\begin{matrix} \pi, \pi, x & 1 \\0, 1 & 1, 2, \frac{3}{\pi} \end{matrix} \right| {z} \right)}' assert latex(meijerg(Tuple(), Tuple(1), (0,), Tuple(),z)) == \ r'{G_{1, 1}^{1, 0}\left.\left(\begin{matrix} & 1 \\0 & \end{matrix} \right| {z} \right)}' assert latex(hyper((x, 2), (3,), z)) == \ r'{{}_{2}F_{1}\left.\left(\begin{matrix} x, 2 ' \ r'\\ 3 \end{matrix}\right| {z} \right)}' assert latex(hyper(Tuple(), Tuple(1), z)) == \ r'{{}_{0}F_{1}\left.\left(\begin{matrix} ' \ r'\\ 1 \end{matrix}\right| {z} \right)}' def test_latex_bessel(): from sympy.functions.special.bessel import (besselj, bessely, besseli, besselk, hankel1, hankel2, jn, yn) from sympy.abc import z assert latex(besselj(n, z**2)**k) == r'J^{k}_{n}\left(z^{2}\right)' assert latex(bessely(n, z)) == r'Y_{n}\left(z\right)' assert latex(besseli(n, z)) == r'I_{n}\left(z\right)' assert latex(besselk(n, z)) == r'K_{n}\left(z\right)' assert latex(hankel1(n, z**2)**2) == \ r'\left(H^{(1)}_{n}\left(z^{2}\right)\right)^{2}' assert latex(hankel2(n, z)) == r'H^{(2)}_{n}\left(z\right)' assert latex(jn(n, z)) == r'j_{n}\left(z\right)' assert latex(yn(n, z)) == r'y_{n}\left(z\right)' def test_latex_brackets(): assert latex((-1)**x) == r"\left(-1\right)^{x}" def test_latex_derivatives(): assert latex(diff(x**3, x, evaluate=False)) == \ r"\frac{\partial}{\partial x} x^{3}" assert latex(diff(sin(x)+x**2, x, evaluate=False)) == \ r"\frac{\partial}{\partial x}\left(x^{2} + \operatorname{sin}\left(x\right)\right)" def test_latex_integrals(): assert latex(Integral(log(x), x)) == r"\int \operatorname{log}\left(x\right)\, dx" assert latex(Integral(x**2, (x,0,1))) == r"\int_{0}^{1} x^{2}\, dx" assert latex(Integral(x**2, (x,10,20))) == r"\int_{10}^{20} x^{2}\, dx" assert latex(Integral(y*x**2, (x,0,1), y)) == r"\int\int_{0}^{1} x^{2} y\, dx\, dy" assert latex(Integral(y*x**2, (x,0,1), y), mode='equation*') \ == r"\begin{equation*}\int\int\limits_{0}^{1} x^{2} y\, dx\, dy\end{equation*}" assert latex(Integral(y*x**2, (x,0,1), y), mode='equation*', itex=True) \ == r"$$\int\int_{0}^{1} x^{2} y\, dx\, dy$$" assert latex(Integral(x, (x, 0))) == r"\int^{0} x\, dx" assert latex(Integral(x*y, x, y)) == r"\iint x y\, dx\, dy" assert latex(Integral(x*y*z, x, y, z)) == r"\iiint x y z\, dx\, dy\, dz" assert latex(Integral(x*y*z*t, x, y, z, t)) == \ r"\iiiint t x y z\, dx\, dy\, dz\, dt" assert latex(Integral(x, x, x, x, x, x, x)) == \ r"\int\int\int\int\int\int x\, dx\, dx\, dx\, dx\, dx\, dx" assert latex(Integral(x, x, y, (z, 0, 1))) == \ r"\int_{0}^{1}\int\int x\, dx\, dy\, dz" def test_latex_intervals(): a = Symbol('a', real=True) assert latex(Interval(0, a)) == r"\left[0, a\right]" assert latex(Interval(0, a, False, False)) == r"\left[0, a\right]" assert latex(Interval(0, a, True, False)) == r"\left(0, a\right]" assert latex(Interval(0, a, False, True)) == r"\left[0, a\right)" assert latex(Interval(0, a, True, True)) == r"\left(0, a\right)" def test_latex_emptyset(): assert latex(S.EmptySet) == r"\emptyset" def test_latex_union(): assert latex(Union(Interval(0, 1), Interval(2, 3))) == \ r"\left[0, 1\right] \cup \left[2, 3\right]" assert latex(Union(Interval(1, 1), Interval(2, 2), Interval(3, 4))) == \ r"\left[3, 4\right] \cup \left\{1, 2\right\}" def test_latex_sum(): assert latex(Sum(x*y**2, (x, -2, 2), (y, -5, 5))) == \ r"\sum_{\substack{-2 \leq x \leq 2\\-5 \leq y \leq 5}} x y^{2}" assert latex(Sum(x**2, (x, -2, 2))) == \ r"\sum_{x=-2}^{2} x^{2}" assert latex(Sum(x**2 + y, (x, -2, 2))) == \ r"\sum_{x=-2}^{2} \left(x^{2} + y\right)" def test_latex_limits(): assert latex(Limit(x, x, oo)) == r"\lim_{x \to \infty} x" def test_issue469(): beta = Symbol(r'\beta') y = beta+x assert latex(y) in [r'\beta + x', r'x + \beta'] beta = Symbol(r'beta') y = beta+x assert latex(y) in [r'\beta + x', r'x + \beta'] def test_latex(): assert latex((2*tau)**Rational(7,2)) == "8 \\sqrt{2} \\tau^{\\frac{7}{2}}" assert latex((2*mu)**Rational(7,2), mode='equation*') == \ "\\begin{equation*}8 \\sqrt{2} \\mu^{\\frac{7}{2}}\\end{equation*}" assert latex((2*mu)**Rational(7,2), mode='equation', itex=True) == \ "$$8 \\sqrt{2} \\mu^{\\frac{7}{2}}$$" assert latex([2/x, y]) =="\\begin{bmatrix}\\frac{2}{x}, & y\\end{bmatrix}" def test_latex_dict(): d = {Rational(1): 1, x**2: 2, x: 3, x**3: 4} assert latex(d) == '\\begin{Bmatrix}1 : 1, & x : 3, & x^{2} : 2, & x^{3} : 4\\end{Bmatrix}' D = Dict(d) assert latex(D) == '\\begin{Bmatrix}1 : 1, & x : 3, & x^{2} : 2, & x^{3} : 4\\end{Bmatrix}' def test_latex_rational(): #tests issue 874 assert latex(-Rational(1,2)) == "- \\frac{1}{2}" assert latex(Rational(-1,2)) == "- \\frac{1}{2}" assert latex(Rational(1,-2)) == "- \\frac{1}{2}" assert latex(-Rational(-1,2)) == "\\frac{1}{2}" assert latex(-Rational(1,2)*x) == "- \\frac{1}{2} x" assert latex(-Rational(1,2)*x+Rational(-2,3)*y) in [ "- \\frac{1}{2} x - \\frac{2}{3} y", "- \\frac{2}{3} y - \\frac{1}{2} x", ] def test_latex_inverse(): #tests issue 1030 assert latex(1/x) == "\\frac{1}{x}" assert latex(1/(x+y)) in ["\\frac{1}{x + y}", "\\frac{1}{y + x}"] def test_latex_DiracDelta(): assert latex(DiracDelta(x)) == "\\delta\\left(x\\right)" assert latex(DiracDelta(x,0)) == "\\delta\\left(x\\right)" assert latex(DiracDelta(x,5)) == "\\delta^{\\left( 5 \\right)}\\left( x \\right)" def test_mode(): expr = x+y assert latex(expr) == 'x + y' assert latex(expr, mode='plain') == 'x + y' assert latex(expr, mode='inline') == '$x + y$' assert latex(expr, mode='equation*')== '\\begin{equation*}x + y\\end{equation*}' assert latex(expr, mode='equation')== '\\begin{equation}x + y\\end{equation}' def test_latex_Piecewise(): p = Piecewise((x,x<1),(x**2,True)) assert latex(p) == "\\begin{cases} x & \\text{for}\: x < 1 \\\\x^{2} &" \ " \\text{otherwise} \\end{cases}" assert latex(p, itex=True) == "\\begin{cases} x & \\text{for}\: x \\lt 1 \\\\x^{2} &" \ " \\text{otherwise} \\end{cases}" def test_latex_Matrix(): M = Matrix([[1+x, y],[y, x-1]]) assert latex(M) == '\\left[\\begin{smallmatrix}x + 1 & y\\\\y & x -'\ '1\\end{smallmatrix}\\right]' settings = {'mat_str' : 'bmatrix'} assert latex(M, **settings) == '\\left[\\begin{bmatrix}x + 1 & y\\\\y &'\ ' x -1\\end{bmatrix}\\right]' settings['mat_delim'] = None assert latex(M, **settings) == '\\begin{bmatrix}x + 1 & y\\\\y & x -1'\ '\\end{bmatrix}' assert latex(M) == '\\left[\\begin{smallmatrix}x + 1 & y\\\\y & x -1'\ '\\end{smallmatrix}\\right]' def test_latex_mul_symbol(): assert latex(4*4**x, mul_symbol='times') == "4 \\times 4^{x}" assert latex(4*4**x, mul_symbol='dot') == "4 \\cdot 4^{x}" assert latex(4*4**x, mul_symbol='ldot') == "4 \,.\, 4^{x}" assert latex(4*x, mul_symbol='times') == "4 \\times x" assert latex(4*x, mul_symbol='dot') == "4 \\cdot x" assert latex(4*x, mul_symbol='ldot') == "4 \,.\, x" def test_latex_Poly(): assert latex(Poly(x**2 + 2 * x, x)) == r"x^{2} + 2 x" def test_latex_issue1282(): y = 4*4**log(2) assert latex(y) == '4 \\times 4^{\\operatorname{log}\\left(2\\right)}' assert latex(1/y) == '\\frac{1}{4 \\times 4^{\\operatorname{log}\\left(2\\right)}}' def test_latex_issue1477(): assert latex(Symbol("beta_13_2")) == r"\beta_{13 2}" assert latex(Symbol("beta_132_20")) == r"\beta_{132 20}" assert latex(Symbol("beta_13")) == r"\beta_{13}" assert latex(Symbol("x_a_b")) == r"x_{a b}" assert latex(Symbol("x_1_2_3")) == r"x_{1 2 3}" assert latex(Symbol("x_a_b1")) == r"x_{a b1}" assert latex(Symbol("x_a_1")) == r"x_{a 1}" assert latex(Symbol("x_1_a")) == r"x_{1 a}" assert latex(Symbol("x_1^aa")) == r"x^{aa}_{1}" assert latex(Symbol("x_1__aa")) == r"x^{aa}_{1}" assert latex(Symbol("x_11^a")) == r"x^{a}_{11}" assert latex(Symbol("x_11__a")) == r"x^{a}_{11}" assert latex(Symbol("x_a_a_a_a")) == r"x_{a a a a}" assert latex(Symbol("x_a_a^a^a")) == r"x^{a a}_{a a}" assert latex(Symbol("x_a_a__a__a")) == r"x^{a a}_{a a}" assert latex(Symbol("alpha_11")) == r"\alpha_{11}" assert latex(Symbol("alpha_11_11")) == r"\alpha_{11 11}" assert latex(Symbol("alpha_alpha")) == r"\alpha_{\alpha}" assert latex(Symbol("alpha^aleph")) == r"\alpha^{\aleph}" assert latex(Symbol("alpha__aleph")) == r"\alpha^{\aleph}" def test_latex_pow_fraction(): x = Symbol('x') # Testing exp assert 'e^{-x}' in latex(exp(-x)/2).replace(' ', '') # Remove Whitespace # Testing just e^{-x} in case future changes alter behavior of muls or fracs # In particular current output is \frac{1}{2}e^{- x} but perhaps this will # change to \frac{e^{-x}}{2} # Testing general, non-exp, power assert '3^{-x}' in latex(3**-x/2).replace(' ', '') def test_noncommutative(): A, B, C = symbols('A,B,C', commutative=False) assert latex(A*B*C**-1) == "A B C^{-1}" assert latex(C**-1*A*B) == "C^{-1} A B" assert latex(A*C**-1*B) == "A C^{-1} B" def test_latex_order(): expr = x**3 + x**2*y + 3*x*y**3 + y**4 assert latex(expr, order='lex') == "x^{3} + x^{2} y + 3 x y^{3} + y^{4}" assert latex(expr, order='rev-lex') == "y^{4} + 3 x y^{3} + x^{2} y + x^{3}" def test_latex_Lambda(): assert latex(Lambda(x, x + 1)) == \ r"\operatorname{\Lambda}\left(x, x + 1\right)" assert latex(Lambda((x, y), x + 1)) == \ r"\operatorname{\Lambda}\left(\begin{pmatrix}x, & y\end{pmatrix}, x + 1\right)" def test_latex_Poly(): assert latex(Poly(x/y, x)) == \ r"\operatorname{Poly}\left(\frac{x}{y}, x, domain=\mathbb{Z}\left(y\right)\right)" assert latex(Poly(2.0*x + y)) == \ r"\operatorname{Poly}\left(2.0 x + 1.0 y, x, y, domain=\mathbb{R}\right)" def test_latex_RootOf(): assert latex(RootOf(x**5 + x + 3, 0)) == \ r"\operatorname{RootOf}\left(x^{5} + x + 3, 0\right)" def test_latex_RootSum(): assert latex(RootSum(x**5 + x + 3, sin)) == \ r"\operatorname{RootSum}\left(x^{5} + x + 3, \operatorname{\Lambda}\left(x, \operatorname{sin}\left(x\right)\right)\right)" def test_settings(): raises(TypeError, 'latex(x*y, method="garbage")')
44.03125
137
0.557326
099b79ac911d10270fbf05f270daa3de72b0aa31
12,730
py
Python
src/align/align_facescrub.py
tensorflow-pool/insightface
d27ad2d3e8b15a9abaddc86dc12c59437db6ee80
[ "MIT" ]
1
2019-02-19T09:53:11.000Z
2019-02-19T09:53:11.000Z
src/align/align_facescrub.py
tensorflow-pool/insightface
d27ad2d3e8b15a9abaddc86dc12c59437db6ee80
[ "MIT" ]
null
null
null
src/align/align_facescrub.py
tensorflow-pool/insightface
d27ad2d3e8b15a9abaddc86dc12c59437db6ee80
[ "MIT" ]
null
null
null
from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import json import os import sys import numpy as np import tensorflow as tf from scipy import misc # import facenet import detect_face sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common')) import face_image from skimage import transform as trans import cv2 def to_rgb(img): w, h = img.shape ret = np.empty((w, h, 3), dtype=np.uint8) ret[:, :, 0] = ret[:, :, 1] = ret[:, :, 2] = img return ret def IOU(Reframe, GTframe): x1 = Reframe[0]; y1 = Reframe[1]; width1 = Reframe[2] - Reframe[0]; height1 = Reframe[3] - Reframe[1]; x2 = GTframe[0] y2 = GTframe[1] width2 = GTframe[2] - GTframe[0] height2 = GTframe[3] - GTframe[1] endx = max(x1 + width1, x2 + width2) startx = min(x1, x2) width = width1 + width2 - (endx - startx) endy = max(y1 + height1, y2 + height2) starty = min(y1, y2) height = height1 + height2 - (endy - starty) if width <= 0 or height <= 0: ratio = 0 else: Area = width * height Area1 = width1 * height1 Area2 = width2 * height2 ratio = Area * 1. / (Area1 + Area2 - Area) return ratio def main(args): output_dir = os.path.expanduser(args.output_dir) if not os.path.exists(output_dir): os.makedirs(output_dir) # Store some git revision info in a text file in the log directory # facenet.store_revision_info(src_path, output_dir, ' '.join(sys.argv)) image_dir = os.path.join(args.input_dir, 'downloaded') dataset = face_image.get_dataset('facescrub', image_dir) print('dataset size', len(dataset)) bbox = {} for label_file in ['facescrub_actors.txt', 'facescrub_actresses.txt']: label_file = os.path.join(args.input_dir, label_file) pp = 0 for line in open(label_file, 'r'): pp += 1 if pp == 1: continue vec = line.split("\t") key = (vec[0], int(vec[2])) value = [int(x) for x in vec[4].split(',')] bbox[key] = value print('bbox size', len(bbox)) valid_key = {} json_data = open(os.path.join(args.input_dir, 'facescrub_uncropped_features_list.json')).read() json_data = json.loads(json_data)['path'] for _data in json_data: key = _data.split('/')[-1] pos = key.rfind('.') if pos < 0: print(_data) else: key = key[0:pos] keys = key.split('_') # print(key) if len(keys) != 2: print('err', key, _data) continue # assert len(keys)==2 key = (keys[0], int(keys[1])) valid_key[key] = 1 # print(key) print('valid keys', len(valid_key)) print('Creating networks and loading parameters') with tf.Graph().as_default(): # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction) # sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False)) sess = tf.Session() with sess.as_default(): pnet, rnet, onet = detect_face.create_mtcnn(sess, None) minsize = 100 # minimum size of face threshold = [0.6, 0.7, 0.7] # three steps's threshold factor = 0.709 # scale factor image_size = [112, 96] image_size = [112, 112] src = np.array([ [30.2946, 51.6963], [65.5318, 51.5014], [48.0252, 71.7366], [33.5493, 92.3655], [62.7299, 92.2041]], dtype=np.float32) if image_size[1] == 112: src[:, 0] += 8.0 # Add a random key to the filename to allow alignment using multiple processes # random_key = np.random.randint(0, high=99999) # bounding_boxes_filename = os.path.join(output_dir, 'bounding_boxes_%05d.txt' % random_key) # output_filename = os.path.join(output_dir, 'faceinsight_align_%s.lst' % args.name) if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) output_filename = os.path.join(args.output_dir, 'lst') with open(output_filename, "w") as text_file: nrof_images_total = 0 nrof = np.zeros((5,), dtype=np.int32) for fimage in dataset: if nrof_images_total % 100 == 0: print("Processing %d, (%s)" % (nrof_images_total, nrof)) nrof_images_total += 1 # if nrof_images_total<950000: # continue image_path = fimage.image_path if not os.path.exists(image_path): print('image not found (%s)' % image_path) continue # print(image_path) filename = os.path.splitext(os.path.split(image_path)[1])[0] _paths = fimage.image_path.split('/') print(fimage.image_path) a, b = _paths[-2], _paths[-1] pb = b.rfind('.') bname = b[0:pb] pb = bname.rfind('_') body = bname[(pb + 1):] if not body.isdigit(): print("file passed ", fimage.image_path) continue img_id = int(body) key = (a, img_id) #all align # if not key in valid_key: # continue # print(b, img_id) # assert key in bbox if not bbox.has_key(key): print("bbox has not key ", key) continue fimage.bbox = bbox[key] try: img = misc.imread(image_path) except (IOError, ValueError, IndexError) as e: errorMessage = '{}: {}'.format(image_path, e) print(errorMessage) else: if img.ndim < 2: print('Unable to align "%s", img dim error' % image_path) # text_file.write('%s\n' % (output_filename)) continue if img.ndim == 2: img = to_rgb(img) img = img[:, :, 0:3] if img.shape[0] == 0 or img.shape[1] == 0: continue tb = bname.replace(' ', '_') + ".png" ta = a.replace(' ', '_') target_dir = os.path.join(args.output_dir, ta) if not os.path.exists(target_dir): os.makedirs(target_dir) target_file = os.path.join(target_dir, tb) warped = None if fimage.landmark is not None: dst = fimage.landmark.astype(np.float32) tform = trans.SimilarityTransform() tform.estimate(dst, src[0:3, :] * 1.5 + image_size[0] * 0.25) M = tform.params[0:2, :] warped0 = cv2.warpAffine(img, M, (image_size[1] * 2, image_size[0] * 2), borderValue=0.0) _minsize = image_size[0] try: bounding_boxes, points = detect_face.detect_face(warped0, _minsize, pnet, rnet, onet, threshold, factor) except Exception as e: print(e) continue if bounding_boxes.shape[0] > 0: bindex = 0 det = bounding_boxes[bindex, 0:4] # points need to be transpose, points = points.reshape( (5,2) ).transpose() dst = points[:, bindex].reshape((2, 5)).T tform = trans.SimilarityTransform() tform.estimate(dst, src) M = tform.params[0:2, :] warped = cv2.warpAffine(warped0, M, (image_size[1], image_size[0]), borderValue=0.0) nrof[0] += 1 # assert fimage.bbox is not None if warped is None and fimage.bbox is not None: _minsize = img.shape[0] // 4 try: bounding_boxes, points = detect_face.detect_face(img, _minsize, pnet, rnet, onet, threshold, factor) except Exception as e: print(e) continue if bounding_boxes.shape[0] > 0: det = bounding_boxes[:, 0:4] bindex = -1 index2 = [0.0, 0] for i in range(det.shape[0]): _det = det[i] iou = IOU(fimage.bbox, _det) if iou > index2[0]: index2[0] = iou index2[1] = i if index2[0] > 0.3: bindex = index2[1] if bindex >= 0: dst = points[:, bindex].reshape((2, 5)).T tform = trans.SimilarityTransform() tform.estimate(dst, src) M = tform.params[0:2, :] warped = cv2.warpAffine(img, M, (image_size[1], image_size[0]), borderValue=0.0) nrof[1] += 1 # print('1',target_file,index2[0]) if warped is None and fimage.bbox is not None: bb = fimage.bbox # croped = img[bb[1]:bb[3],bb[0]:bb[2],:] try: bounding_boxes, points = detect_face.detect_face_force(img, bb, pnet, rnet, onet) except Exception as e: print(e) continue assert bounding_boxes.shape[0] == 1 _box = bounding_boxes[0] if _box[4] >= 0.3: dst = points[:, 0].reshape((2, 5)).T tform = trans.SimilarityTransform() tform.estimate(dst, src) M = tform.params[0:2, :] warped = cv2.warpAffine(img, M, (image_size[1], image_size[0]), borderValue=0.0) nrof[2] += 1 # print('2',target_file) if warped is None: roi = np.zeros((4,), dtype=np.int32) roi[0] = int(img.shape[1] * 0.06) roi[1] = int(img.shape[0] * 0.06) roi[2] = img.shape[1] - roi[0] roi[3] = img.shape[0] - roi[1] if fimage.bbox is not None: bb = fimage.bbox h = bb[3] - bb[1] w = bb[2] - bb[0] x = bb[0] y = bb[1] # roi = np.copy(bb) _w = int((float(h) / image_size[0]) * image_size[1]) x += (w - _w) // 2 # x = min( max(0,x), img.shape[1] ) x = max(0, x) xw = x + _w xw = min(xw, img.shape[1]) roi = np.array((x, y, xw, y + h), dtype=np.int32) nrof[3] += 1 else: nrof[4] += 1 # print('3',bb,roi,img.shape) # print('3',target_file) warped = img[roi[1]:roi[3], roi[0]:roi[2], :] # print(warped.shape) try: warped = cv2.resize(warped, (image_size[1], image_size[0])) except Exception as e: print(e) continue bgr = warped[..., ::-1] cv2.imwrite(target_file, bgr) oline = '%d\t%s\t%d\n' % (1, target_file, int(fimage.classname)) text_file.write(oline) def parse_arguments(argv): parser = argparse.ArgumentParser() parser.add_argument('--input-dir', type=str, help='Directory with unaligned images.', default="/home/lijc08/datasets/mega_facescrub/") parser.add_argument('--output-dir', type=str, help='Directory with aligned face thumbnails.', default="/home/lijc08/datasets/mega_facescrub/facescrub_aligned_112") # parser.add_argument('--image_size', type=int, # help='Image size (height, width) in pixels.', default=182) # parser.add_argument('--margin', type=int, # help='Margin for the crop around the bounding box (height, width) in pixels.', default=44) return parser.parse_args(argv) if __name__ == '__main__': main(parse_arguments(sys.argv[1:]))
39.78125
167
0.487588
10cd07ee0c13fb12ae430dc834be1c65a7b4a327
3,053
py
Python
client/commands/tests/stop_test.py
skylerberg/pyre-check
e7967e5ee65dd09608f162cdb36a5b0919aeb5e3
[ "MIT" ]
null
null
null
client/commands/tests/stop_test.py
skylerberg/pyre-check
e7967e5ee65dd09608f162cdb36a5b0919aeb5e3
[ "MIT" ]
null
null
null
client/commands/tests/stop_test.py
skylerberg/pyre-check
e7967e5ee65dd09608f162cdb36a5b0919aeb5e3
[ "MIT" ]
null
null
null
# Copyright (c) 2016-present, Facebook, Inc. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-unsafe import os import unittest from io import StringIO from unittest.mock import Mock, call, patch from ... import commands # noqa from ...commands import command, stop # noqa from ...filesystem import AnalysisDirectory from .command_test import mock_arguments, mock_configuration class StopTest(unittest.TestCase): @patch.object(os, "kill") @patch.object(commands.stop, "open") @patch.object(commands.Kill, "_run") @patch.object(commands.Command, "_state") def test_stop(self, commands_Command_state, kill_run, file_open, os_kill) -> None: file_open.side_effect = lambda filename: StringIO("42") arguments = mock_arguments() arguments.terminal = False configuration = mock_configuration() analysis_directory = AnalysisDirectory(".") def mark_processes_as_completed(process_id, signal): if signal == 0: raise ProcessLookupError() else: return os_kill.side_effect = mark_processes_as_completed # Check start without watchman. commands_Command_state.return_value = commands.command.State.RUNNING with patch.object(commands.Command, "_call_client") as call_client: commands.Stop(arguments, configuration, analysis_directory).run() call_client.assert_called_once_with(command=commands.Stop.NAME) kill_run.assert_not_called() os_kill.assert_has_calls([call(42, 2)]) commands_Command_state.return_value = commands.command.State.DEAD with patch.object(commands.Command, "_call_client") as call_client: commands.Stop(arguments, configuration, analysis_directory).run() call_client.assert_not_called() kill_run.assert_has_calls([call()]) os_kill.assert_has_calls([call(42, 0), call(42, 2), call(42, 2)]) commands_Command_state.return_value = commands.command.State.RUNNING with patch.object(commands.Command, "_call_client") as call_client: def fail_on_stop(command, flags=None): # noqa flags = flags or [] if command == commands.Stop.NAME: raise commands.ClientException return Mock() call_client.side_effect = fail_on_stop commands.Stop(arguments, configuration, analysis_directory).run() call_client.assert_has_calls([call(command=commands.Stop.NAME)]) kill_run.assert_has_calls([call(), call()]) os_kill.assert_has_calls( [call(42, 0), call(42, 2), call(42, 2), call(42, 2)] ) # Stop ignores irrelevant flags. arguments.debug = True call_client.side_effect = None flags = commands.Stop(arguments, configuration, analysis_directory)._flags() self.assertEqual(flags, [])
39.649351
86
0.664592
ed26910e3b23f2e4df521bf1d5f1dbc5ac9b2d8c
773
py
Python
percy/tools.py
sakethramanujam/percy-image-downloader
b835e726b0755c45400c0e219cc03c0ddd77e482
[ "MIT" ]
7
2021-02-23T03:29:02.000Z
2022-01-19T04:20:32.000Z
percy/tools.py
sakethramanujam/percy-image-downloader
b835e726b0755c45400c0e219cc03c0ddd77e482
[ "MIT" ]
6
2021-02-23T05:05:44.000Z
2021-02-25T16:47:58.000Z
percy/tools.py
sakethramanujam/percy-image-downloader
b835e726b0755c45400c0e219cc03c0ddd77e482
[ "MIT" ]
2
2021-02-23T04:59:41.000Z
2021-02-23T15:44:06.000Z
import os import requests from .settings import STATS_URL def n_pages(): """ Finds total number of available pages """ try: r = get(STATS_URL) stats = r.json() n = round(stats["total"]/50) return n except Exception as e: print(f"Error: {e}") def checkpath(*paths: list): """ Checks if a give path exists Creates dirs if doesn't exist """ for path in paths: if not os.path.exists(path): os.makedirs(path) def get(url: str, **kwargs): """ Wrapper for requests.get """ r = requests.get(url, **kwargs) if r.status_code == 200: return r else: raise Exception( f"Network Exception, failed to fetch requested page {url}")
19.820513
71
0.56533
9e287a19b4880b6bd5bb429aaafc92c93a25a32e
7,013
py
Python
examples/complex/har_dump.py
dotnes/mitmproxy
5eb17bbf6d47c8d703763bfa41cf1ff3f98a632f
[ "MIT" ]
1
2017-12-27T09:05:23.000Z
2017-12-27T09:05:23.000Z
examples/complex/har_dump.py
dotnes/mitmproxy
5eb17bbf6d47c8d703763bfa41cf1ff3f98a632f
[ "MIT" ]
1
2021-05-11T20:51:11.000Z
2021-05-11T20:51:11.000Z
examples/complex/har_dump.py
dotnes/mitmproxy
5eb17bbf6d47c8d703763bfa41cf1ff3f98a632f
[ "MIT" ]
null
null
null
""" This inline script can be used to dump flows as HAR files. """ import json import base64 import zlib import os import typing # noqa from datetime import datetime from datetime import timezone import mitmproxy from mitmproxy import connections # noqa from mitmproxy import version from mitmproxy import ctx from mitmproxy.utils import strutils from mitmproxy.net.http import cookies HAR = {} # type: typing.Dict # A list of server seen till now is maintained so we can avoid # using 'connect' time for entries that use an existing connection. SERVERS_SEEN = set() # type: typing.Set[connections.ServerConnection] def load(l): l.add_option( "hardump", str, "", "HAR dump path.", ) def configure(updated): HAR.update({ "log": { "version": "1.2", "creator": { "name": "mitmproxy har_dump", "version": "0.1", "comment": "mitmproxy version %s" % version.MITMPROXY }, "entries": [] } }) def response(flow): """ Called when a server response has been received. """ # -1 indicates that these values do not apply to current request ssl_time = -1 connect_time = -1 if flow.server_conn and flow.server_conn not in SERVERS_SEEN: connect_time = (flow.server_conn.timestamp_tcp_setup - flow.server_conn.timestamp_start) if flow.server_conn.timestamp_tls_setup is not None: ssl_time = (flow.server_conn.timestamp_tls_setup - flow.server_conn.timestamp_tcp_setup) SERVERS_SEEN.add(flow.server_conn) # Calculate raw timings from timestamps. DNS timings can not be calculated # for lack of a way to measure it. The same goes for HAR blocked. # mitmproxy will open a server connection as soon as it receives the host # and port from the client connection. So, the time spent waiting is actually # spent waiting between request.timestamp_end and response.timestamp_start # thus it correlates to HAR wait instead. timings_raw = { 'send': flow.request.timestamp_end - flow.request.timestamp_start, 'receive': flow.response.timestamp_end - flow.response.timestamp_start, 'wait': flow.response.timestamp_start - flow.request.timestamp_end, 'connect': connect_time, 'ssl': ssl_time, } # HAR timings are integers in ms, so we re-encode the raw timings to that format. timings = dict([(k, int(1000 * v)) for k, v in timings_raw.items()]) # full_time is the sum of all timings. # Timings set to -1 will be ignored as per spec. full_time = sum(v for v in timings.values() if v > -1) started_date_time = datetime.fromtimestamp(flow.request.timestamp_start, timezone.utc).isoformat() # Response body size and encoding response_body_size = len(flow.response.raw_content) response_body_decoded_size = len(flow.response.content) response_body_compression = response_body_decoded_size - response_body_size entry = { "startedDateTime": started_date_time, "time": full_time, "request": { "method": flow.request.method, "url": flow.request.url, "httpVersion": flow.request.http_version, "cookies": format_request_cookies(flow.request.cookies.fields), "headers": name_value(flow.request.headers), "queryString": name_value(flow.request.query or {}), "headersSize": len(str(flow.request.headers)), "bodySize": len(flow.request.content), }, "response": { "status": flow.response.status_code, "statusText": flow.response.reason, "httpVersion": flow.response.http_version, "cookies": format_response_cookies(flow.response.cookies.fields), "headers": name_value(flow.response.headers), "content": { "size": response_body_size, "compression": response_body_compression, "mimeType": flow.response.headers.get('Content-Type', '') }, "redirectURL": flow.response.headers.get('Location', ''), "headersSize": len(str(flow.response.headers)), "bodySize": response_body_size, }, "cache": {}, "timings": timings, } # Store binary data as base64 if strutils.is_mostly_bin(flow.response.content): entry["response"]["content"]["text"] = base64.b64encode(flow.response.content).decode() entry["response"]["content"]["encoding"] = "base64" else: entry["response"]["content"]["text"] = flow.response.get_text(strict=False) if flow.request.method in ["POST", "PUT", "PATCH"]: params = [ {"name": a, "value": b} for a, b in flow.request.urlencoded_form.items(multi=True) ] entry["request"]["postData"] = { "mimeType": flow.request.headers.get("Content-Type", ""), "text": flow.request.get_text(strict=False), "params": params } if flow.server_conn.connected(): entry["serverIPAddress"] = str(flow.server_conn.ip_address[0]) HAR["log"]["entries"].append(entry) def done(): """ Called once on script shutdown, after any other events. """ if ctx.options.hardump: json_dump = json.dumps(HAR, indent=2) # type: str if ctx.options.hardump == '-': mitmproxy.ctx.log(json_dump) else: raw = json_dump.encode() # type: bytes if ctx.options.hardump.endswith('.zhar'): raw = zlib.compress(raw, 9) with open(os.path.expanduser(ctx.options.hardump), "wb") as f: f.write(raw) mitmproxy.ctx.log("HAR dump finished (wrote %s bytes to file)" % len(json_dump)) def format_cookies(cookie_list): rv = [] for name, value, attrs in cookie_list: cookie_har = { "name": name, "value": value, } # HAR only needs some attributes for key in ["path", "domain", "comment"]: if key in attrs: cookie_har[key] = attrs[key] # These keys need to be boolean! for key in ["httpOnly", "secure"]: cookie_har[key] = bool(key in attrs) # Expiration time needs to be formatted expire_ts = cookies.get_expiration_ts(attrs) if expire_ts is not None: cookie_har["expires"] = datetime.fromtimestamp(expire_ts, timezone.utc).isoformat() rv.append(cookie_har) return rv def format_request_cookies(fields): return format_cookies(cookies.group_cookies(fields)) def format_response_cookies(fields): return format_cookies((c[0], c[1][0], c[1][1]) for c in fields) def name_value(obj): """ Convert (key, value) pairs to HAR format. """ return [{"name": k, "value": v} for k, v in obj.items()]
32.771028
102
0.618423
f16b44fe112bb81dbc3ef49faf4acc55eb7925d4
446
py
Python
lugar/migrations/0003_pais_slug.py
CARocha/cafod-joa
e207b29375cd1f2219086b54ec6280e5c5789c32
[ "MIT" ]
1
2021-11-05T11:33:01.000Z
2021-11-05T11:33:01.000Z
lugar/migrations/0003_pais_slug.py
CARocha/cafod-joa
e207b29375cd1f2219086b54ec6280e5c5789c32
[ "MIT" ]
6
2020-06-05T18:13:39.000Z
2022-01-13T00:45:03.000Z
lugar/migrations/0003_pais_slug.py
CARocha/cafod-joa
e207b29375cd1f2219086b54ec6280e5c5789c32
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('lugar', '0002_microcuenca'), ] operations = [ migrations.AddField( model_name='pais', name='slug', field=models.SlugField(help_text=b'Usado como url unica(autorellenado)', unique=True, null=True), ), ]
22.3
109
0.61435
c309c4b9ab17156cdadf27985b0148379a2ce50f
2,870
py
Python
Megatron-LM-v1.1.5-ZeRO3/megatron/model/utils.py
ganik/DeepSpeedExamples
174ae3bc8dbb688cfaccb4afa15d6e2cdbe19ce5
[ "MIT" ]
309
2020-02-07T23:09:27.000Z
2022-03-31T08:01:53.000Z
Megatron-LM-v1.1.5-ZeRO3/megatron/model/utils.py
ganik/DeepSpeedExamples
174ae3bc8dbb688cfaccb4afa15d6e2cdbe19ce5
[ "MIT" ]
93
2020-02-22T05:56:28.000Z
2022-03-27T08:43:38.000Z
Megatron-LM-v1.1.5-ZeRO3/megatron/model/utils.py
ganik/DeepSpeedExamples
174ae3bc8dbb688cfaccb4afa15d6e2cdbe19ce5
[ "MIT" ]
148
2020-02-14T22:16:11.000Z
2022-03-22T17:08:04.000Z
# coding=utf-8 # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities for models.""" import math import torch from .transformer import LayerNorm def init_method_normal(sigma): """Init method based on N(0, sigma).""" def init_(tensor): return torch.nn.init.normal_(tensor, mean=0.0, std=sigma) return init_ def scaled_init_method_normal(sigma, num_layers): """Init method based on N(0, sigma/sqrt(2*num_layers).""" std = sigma / math.sqrt(2.0 * num_layers) def init_(tensor): return torch.nn.init.normal_(tensor, mean=0.0, std=std) return init_ def get_linear_layer(rows, columns, init_method): """Simple linear layer with weight initialization.""" layer = torch.nn.Linear(rows, columns) init_method(layer.weight) with torch.no_grad(): layer.bias.zero_() return layer @torch.jit.script def gelu_impl(x): """OpenAI's gelu implementation.""" return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x * (1.0 + 0.044715 * x * x))) def openai_gelu(x): return gelu_impl(x) #This is actually Python equivalent of torch.nn.functional.gelu(), also with type hints for ONNX exporter @torch.jit.script def erf_gelu(x): return x * 0.5 * (torch.erf(x / 1.41421).to(dtype=x.dtype)+torch.ones_like(x).to(dtype=x.dtype)) def get_params_for_weight_decay_optimization(module): """Divide params into with-weight-decay and without-weight-decay groups. Layernorms and baises will have no weight decay but the rest will. """ weight_decay_params = {'params': []} no_weight_decay_params = {'params': [], 'weight_decay': 0.0} for module_ in module.modules(): if isinstance(module_, LayerNorm): no_weight_decay_params['params'].extend( [p for p in list(module_._parameters.values()) if p is not None]) else: weight_decay_params['params'].extend( [p for n, p in list(module_._parameters.items()) if p is not None and n != 'bias']) no_weight_decay_params['params'].extend( [p for n, p in list(module_._parameters.items()) if p is not None and n == 'bias']) return weight_decay_params, no_weight_decay_params
34.166667
105
0.665157
a478552a7eb0ece7c5120af06e2131699d49ba60
1,430
py
Python
tests/test_crawler.py
unit-00/storyteller-data
685648cb558b11c236178bdef0e039069aa23ef3
[ "MIT" ]
null
null
null
tests/test_crawler.py
unit-00/storyteller-data
685648cb558b11c236178bdef0e039069aa23ef3
[ "MIT" ]
null
null
null
tests/test_crawler.py
unit-00/storyteller-data
685648cb558b11c236178bdef0e039069aa23ef3
[ "MIT" ]
null
null
null
"""Tests for Aesop spider""" import unittest # pylint: disable=missing-module-docstring from pymongo import MongoClient import requests from crawler.aesop import get_aesop_links, AesopSpider class TestAesopSpider(unittest.TestCase): """Test cases for Aesop spider""" @classmethod def setUpClass(cls): cls.base_url = 'http://read.gov/aesop/' cls.links = get_aesop_links(cls.base_url)[:2] cls.test_spider = AesopSpider(cls.links) cls.test_db = 'test_db' cls.test_coll = 'test_coll' cls.client = MongoClient('localhost', 27017) cls.coll = cls.client[cls.test_db][cls.test_coll] def test_get_links(self): """Check if get_aesop_links return links""" self.assertTrue(len(self.links) == 2) def test_parse(self): """Check if parse method is parsing properly""" test_story = self.links[0] response = requests.get(test_story) self.assertTrue(isinstance(self.test_spider._parse(response.content), dict)) # pylint: disable=protected-access def test_crawl(self): """Check if crawl is able to insert documents into mongo server""" self.test_spider.crawl(self.coll) self.assertEqual(self.coll.count_documents({}), 2) @classmethod def tearDownClass(cls): cls.client.drop_database(cls.test_db) if __name__ == '__main__': unittest.main()
30.425532
77
0.663636
0a9cd144805026fbd5e80622c1d1dadf19dbf737
8,568
py
Python
pgs_web/settings.py
HDRUK/PGS_Catalog
d59067fc61961770d1e0f8bb6081d10d8bbea3e9
[ "Apache-2.0" ]
5
2020-01-29T18:04:08.000Z
2022-01-04T18:04:05.000Z
pgs_web/settings.py
PGScatalog/PGS_Catalog
d59067fc61961770d1e0f8bb6081d10d8bbea3e9
[ "Apache-2.0" ]
37
2020-02-25T08:50:04.000Z
2022-02-15T10:11:34.000Z
pgs_web/settings.py
HDRUK/PGS_Catalog
d59067fc61961770d1e0f8bb6081d10d8bbea3e9
[ "Apache-2.0" ]
3
2020-01-14T10:19:14.000Z
2020-09-08T20:11:34.000Z
""" Django settings for pgs_web project. Generated by 'django-admin startproject' using Django 3.0.2. For more information on this file, see https://docs.djangoproject.com/en/3.0/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.0/ref/settings/ """ import os if not os.getenv('GAE_APPLICATION', None): app_settings = os.path.join('./', 'app.yaml') if os.path.exists(app_settings): import yaml with open(app_settings) as secrets_file: secrets = yaml.load(secrets_file, Loader=yaml.FullLoader) for keyword in secrets['env_variables']: os.environ[keyword] = secrets['env_variables'][keyword] elif not os.environ['SECRET_KEY']: print("Error: missing secret key") exit(1) # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = os.environ['SECRET_KEY'] # Define auto field for primary keys (since Django 3.2) DEFAULT_AUTO_FIELD = 'django.db.models.AutoField' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = False if os.environ['DEBUG'] == 'True': DEBUG = True ALLOWED_HOSTS = os.environ['ALLOWED_HOSTS'].split(',') # Application definition INSTALLED_APPS = [ 'catalog.apps.CatalogConfig', 'rest_api.apps.RestApiConfig', 'search.apps.SearchConfig', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django_tables2', 'django_extensions', 'compressor', 'rest_framework', 'corsheaders', 'django_elasticsearch_dsl' ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'corsheaders.middleware.CorsMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'pgs_web.urls' CONTEXT_PROCESSORS = [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'catalog.context_processors.pgs_urls', 'catalog.context_processors.pgs_settings', 'catalog.context_processors.pgs_search_examples', 'catalog.context_processors.pgs_info' ] if os.getenv('GAE_APPLICATION', None) and DEBUG==False: TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'templates')], 'OPTIONS': { 'context_processors': CONTEXT_PROCESSORS, 'loaders': [ ('django.template.loaders.cached.Loader', [ 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader' ]) ] }, }, ] else: TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': CONTEXT_PROCESSORS }, }, ] # Flags if os.getenv('GAE_APPLICATION', None): PGS_ON_GAE = 1 else: PGS_ON_GAE = 0 PGS_ON_LIVE_SITE = False if 'PGS_LIVE_SITE' in os.environ: PGS_ON_LIVE_SITE = os.environ['PGS_LIVE_SITE'] PGS_ON_CURATION_SITE = False if 'PGS_CURATION_SITE' in os.environ: PGS_ON_CURATION_SITE = os.environ['PGS_CURATION_SITE'] WSGI_APPLICATION = 'pgs_web.wsgi.application' # Database # https://docs.djangoproject.com/en/3.0/ref/settings/#databases # [START db_setup] if os.getenv('GAE_APPLICATION', None): # Running on production App Engine, so connect to Google Cloud SQL using # the unix socket at /cloudsql/<your-cloudsql-connection string> DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': os.environ['DATABASE_NAME'], 'USER': os.environ['DATABASE_USER'], 'PASSWORD': os.environ['DATABASE_PASSWORD'], 'HOST': os.environ['DATABASE_HOST'], 'PORT': os.environ['DATABASE_PORT'] } } else: # Running locally so connect to either a local PostgreSQL instance or connect # to Cloud SQL via the proxy. To start the proxy via command line: # $ cloud_sql_proxy -instances=pgs-catalog:europe-west2:pgs-*******=tcp:5430 # See https://cloud.google.com/sql/docs/postgres/connect-admin-proxy DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': os.environ['DATABASE_NAME'], 'USER': os.environ['DATABASE_USER'], 'PASSWORD': os.environ['DATABASE_PASSWORD'], 'HOST': 'localhost', 'PORT': os.environ['DATABASE_PORT_LOCAL'] } } # [END db_setup] # Password validation # https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.0/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.0/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, "static/") STATICFILES_FINDERS = [ 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder' ] if not os.getenv('GAE_APPLICATION', None): STATICFILES_FINDERS.append('compressor.finders.CompressorFinder') COMPRESS_PRECOMPILERS = '' COMPRESS_ROOT = os.path.join(BASE_DIR, "static/") COMPRESS_PRECOMPILERS = ( ('text/x-scss', 'django_libsass.SassCompiler'), ) #---------------------# # REST API Settings # #---------------------# #REST_SAFELIST_IPS = [ # '127.0.0.1' #] REST_BLACKLIST_IPS = [ #'127.0.0.1' ] REST_FRAMEWORK = { # Use Django's standard `django.contrib.auth` permissions, # or allow read-only access for unauthenticated users. 'DEFAULT_PERMISSION_CLASSES': [ 'rest_api.rest_permissions.BlacklistPermission', # see REST_BLACKLIST_IPS #'rest_api.rest_permissions.SafelistPermission', # see REST_SAFELIST_IPS #'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly' ], 'TEST_REQUEST_DEFAULT_FORMAT': 'json', 'DEFAULT_RENDERER_CLASSES': [ 'rest_framework.renderers.JSONRenderer', 'rest_framework.renderers.BrowsableAPIRenderer', ], 'DEFAULT_PAGINATION_CLASS': 'rest_api.pagination.CustomPagination', 'PAGE_SIZE': 50, 'EXCEPTION_HANDLER': 'rest_api.views.custom_exception_handler', 'DEFAULT_THROTTLE_CLASSES': [ 'rest_framework.throttling.AnonRateThrottle', 'rest_framework.throttling.UserRateThrottle' ], 'DEFAULT_THROTTLE_RATES' : { 'anon': '100/min', 'user': '100/min' } } #-----------------# # CORS Settings # #-----------------# CORS_ALLOWED_ORIGIN_REGEXES = [ r"^https:\/\/\w+\.ebi\.ac\.uk$" ] CORS_URLS_REGEX = r'^/rest/.*$' CORS_ALLOW_METHODS = ['GET'] #--------------------------# # Elasticsearch Settings # #--------------------------# # Elasticsearch configuration ELASTICSEARCH_DSL = { 'default': { 'hosts': os.environ['ELASTICSEARCH_URL_ROOT'] } } # Name of the Elasticsearch index ELASTICSEARCH_INDEX_NAMES = { 'search.documents.score': 'score', 'search.documents.efo_trait': 'efo_trait', 'search.documents.publication': 'publication' }
29.142857
91
0.663749
2a7a942caa3628a1b7465bc638c77f890bbee3d4
1,449
py
Python
spid_cie_oidc/entity/schemas/rp_metadata.py
peppelinux/spid-cie-oidc-authority
816636fece10f410f5d6fce85fd79bb409d0c8b8
[ "Apache-2.0" ]
4
2022-03-08T09:05:13.000Z
2022-03-16T17:59:43.000Z
spid_cie_oidc/entity/schemas/rp_metadata.py
peppelinux/spid-cie-oidc-authority
816636fece10f410f5d6fce85fd79bb409d0c8b8
[ "Apache-2.0" ]
64
2022-03-08T01:11:40.000Z
2022-03-31T17:23:49.000Z
spid_cie_oidc/entity/schemas/rp_metadata.py
peppelinux/spid-cie-oidc-authority
816636fece10f410f5d6fce85fd79bb409d0c8b8
[ "Apache-2.0" ]
8
2022-03-09T12:00:08.000Z
2022-03-31T13:52:14.000Z
from enum import Enum from typing import List, Optional from pydantic import BaseModel, HttpUrl, validator from .jwks import JwksCie, JwksSpid class GrantTypeSupported(str, Enum): refresh_token = "refresh_token" # nosec - B105 authorization_code = "authorization_code" class RPMetadata(BaseModel): redirect_uris: List[HttpUrl] response_types = ["code"] grant_types: List[GrantTypeSupported] client_id: HttpUrl # TODO: Could be specified in multiple languages client_name: str class RPMetadataSpid(RPMetadata): jwks_uri: Optional[HttpUrl] jwks: Optional[JwksSpid] @validator("jwks", pre=True, always=True) def validate_jwks_uri(cls, jwks, values): jwks_uri = values.get("jwks_uri") if not jwks_uri and not jwks: raise ValueError("one of jwks_uri or jwks must be set") if jwks_uri and jwks: raise ValueError("jwks MUST NOT indicate") class RPMetadataCie(RPMetadata): jwks_uri: Optional[HttpUrl] jwks: Optional[JwksCie] application_type = "web" tls_client_certificate_bound_access_tokens: Optional[bool] @validator("jwks", pre=True, always=True) def validate_jwks_uri(cls, jwks, values): jwks_uri = values.get("jwks_uri") if not jwks_uri and not jwks: raise ValueError("one of jwks_uri or jwks must be set") if jwks_uri and jwks: raise ValueError("jwks MUST NOT indicate")
30.829787
67
0.698413
6e16a3380651d2d79759b99cc7ce8b8433700024
40,169
py
Python
JumpScale9Lib/clients/kubernetes/Kubernetes.py
Jumpscale/lib9
82224784ef2a7071faeb48349007211c367bc673
[ "Apache-2.0" ]
2
2017-06-07T08:11:47.000Z
2017-11-10T02:19:48.000Z
JumpScale9Lib/clients/kubernetes/Kubernetes.py
Jumpscale/lib9
82224784ef2a7071faeb48349007211c367bc673
[ "Apache-2.0" ]
188
2017-06-21T06:16:13.000Z
2020-06-17T14:20:24.000Z
JumpScale9Lib/clients/kubernetes/Kubernetes.py
Jumpscale/lib9
82224784ef2a7071faeb48349007211c367bc673
[ "Apache-2.0" ]
3
2018-06-12T05:18:28.000Z
2019-09-24T06:49:17.000Z
from kubernetes import client, config import time import urllib from js9 import j TEMPLATE = """ config_path = "" context = "" sshkey_path = "" incluster_config = false """ JSConfigBase = j.tools.configmanager.base_class_config JSBASE = j.application.jsbase_get_class() class KubernetesMaster(JSConfigBase): """ A class that represents a top view of the hirarchy. Where only the config, context , or namespace are defined. """ def __init__(self, instance, data={}, parent=None, interactive=False): """ Creates a client instance that connects to either a config path or context or both """ JSConfigBase.__init__(self, instance=instance, data=data, parent=parent, template=TEMPLATE, interactive=interactive) # load data from jsconfig c = self.config.data config_path = c['config_path'] context = c['context'] sshkey_path = c['sshkey_path'] incluster_config = c['incluster_config'] if incluster_config: config.load_incluster_config() else: config.load_kube_config(config_file=config_path, context=context) self._v1 = client.CoreV1Api() self._extensionv1b1 = client.ExtensionsV1beta1Api( api_client=self._v1.api_client) if not config_path: config_path = '%s/.kube/config' % j.dirs.HOMEDIR self._config = j.data.serializer.yaml.load(config_path) self.sshkey_path = sshkey_path if not sshkey_path: self.sshkey_path = j.sal.fs.joinPaths( j.dirs.HOMEDIR, '.ssh', j.core.state.configMe["ssh"]["sshkeyname"]) @property def namespaces(self): """ get a list of all available namespaces and their relevant information. """ output = list() namespace_list = self._v1.list_namespace() for namespace in namespace_list.items: namespace_dict = {'name': namespace.metadata.name, 'cluster_name': namespace.metadata.cluster_name, 'status': namespace.status.phase} output.append(namespace_dict) return output def get_namespace(self, name): """ Get namespace object with specified name. @param name,, str name of namespace. """ return self._v1.read_namespace(name) @property def clusters(self): """ get a list of all available clusters and their relevant information. """ return self._config.get('clusters', []) def get_cluster(self, name): """ will get the cluster with the defined name. @param name,, str name of cluster to get """ for cluster in self._config.get('clusters', []): if name == cluster['name']: return cluster ####################### # master.NODE # ####################### def get_node(self, name): """ will get the cluster with the defined name. @param name ,, str node name. """ return self._v1.read_node(name=name) def list_nodes(self, label='', short=True): """ will get all nodes within the defined label. @param label,, dict labels to filter on. example {'beta.kubernetes.io/arch': 'amd64', 'beta.kubernetes.io/os': 'linux', 'kubernetes.io/hostname': 'minikube'} @param short,, bool return small dict if true return full object if false """ nodes = self._v1.list_node(label_selector=label).items if short: output = [] for node in nodes: node_dict = {'name': node.metadata.name, 'image': node.status.node_info.os_image, 'addresses': node.status.addresses} output.append(node_dict) return output return nodes ###################### # master.DEPLOYMENT # ###################### def get_deployment(self, name, namespace='default'): """ will get the deployment with the defined name and namespace. if no namespace defined , will default to the 'default' name space : !! important. @param name ,, str deployment name. @param namespace,, str namespace to filter on. """ dep_obj = self._extensionv1b1.read_namespaced_deployment(name, namespace) return Deployment(dep_obj.metadata.name, self, [], deployment_object=dep_obj) def list_deployments(self, namespace=None, short=True): """ will get all deployment within the defined namespace(if no space given will return all). @param namespace,, str namespace to filter on. @param short,, bool return small dict if true return full object if false """ deployments = [] if namespace: deployment_objects = self._extensionv1b1.list_namespaced_deployment( namespace).items else: deployment_objects = self._extensionv1b1.list_deployment_for_all_namespaces().items if short: output = [] for dep in deployment_objects: dep_dict = {'name': dep.metadata.name, 'namespace': dep.metadata.namespace, 'replicas': dep.status.replicas} output.append(dep_dict) return output for dep_obj in deployment_objects: deployments.append(Deployment(dep_obj.metadata.name, self, [], deployment_object=deployment_object)) return deployments def define_deployment(self, name, containers, namespace='default', labels={}, replicas=1, kind='Deployment', cluster_name=None, generate_name=None, volumes=[]): """ define a new deployment returning the Deployment object. @param name,, str name of deployment @param containers,, list(V1Container) can be defined through the self.define_container method @param namespace,, namespace to tag the deployment with @param labels,, dict can be used to specify selectors for filtering and group actions @param replicas,,int number of replicas that will be maintained running throughout the life of the deployment @param kind,, str kind of object to be defined , will default to Deployment @param cluster_name,, str cluster or context to create deployment on @param generate_name,,str first part of the generated name """ api_version = 'extensions/v1beta1' return Deployment(name, master=self, namespace=namespace, containers=containers, labels=labels, replicas=replicas, api_version=api_version, cluster_name=cluster_name, generate_name=generate_name, volumes=volumes) def deploy_ubuntu1604(self, name, namespace='default', labels={}, replicas=1, generate_name=None, sshkey_path=None, external_ssh_port=32202, volumes=[], volume_mounts=[]): """ Creates and deploys a ubuntu1604 phusion image deployment that has a ssh configured. @param name,, str name of deployment @param namespace,, namespace to tag the deployment with @param labels,, dict can be used to specify selectors for filtering and group actions @param replicas,,int number of replicas that will be maintained running throughout the life of the deployment. @param cluster_name,, str cluster or context to create deployment on. NOT SUPPORTED by the api server @param generate_name,,str first part of the generated name @param sshkey_path,,str path to new ssh key if none will default to preloaded key @param external_ssh_port,,int external port to map the ssh 22 port to. """ # define container container = self.define_container(name='ubuntu1604', image='jumpscale/ubuntu1604', command=['/sbin/my_init'], ports=[22], enable_ssh=True, sshkey_path=sshkey_path, volume_mounts=volume_mounts) app_label = {'app': name} labels.update(app_label) deployment = self.define_deployment(name=name, labels=labels, namespace=namespace, containers=[container], replicas=replicas, generate_name=generate_name, volumes=volumes) deployment.create() ssh_service = self.define_ssh_service( name, app_label['app'], external_ssh_port) ssh_service.create() clusters = self._config.get('clusters') if not clusters: raise RuntimeError('no Clusters defined this your configuration is incorrect') api_server_endpoint = urllib.parse.urlsplit(clusters[0]['cluster']['server']) node_ip = api_server_endpoint.hostname return j.tools.prefab.get('%s:%s' % (node_ip, external_ssh_port)) ###################### # master.VOLUMES # ###################### def define_host_path_volume(self, name, path, data_type=''): """ Represents a host path on the node mapped into a pod. This does not change permissions and kubernetes is not responsible for creating the path location or managing it , it only uses the volume for mounting and checking on the type of the path Host path volumes do not support ownership management or SELinux relabeling. @param name ,, str name of volume that is created. @param path ,, str path of the directory on the host. If the path is a symlink, it will follow the link to the real path. @param type ,, str Type for HostPath Volume Defaults to "". """ host_path_vol = client.V1HostPathVolumeSource(path=path, type=data_type) return client.V1Volume(name=name, host_path=host_path_vol) def define_config_map_volume(self, name, config_name, config_items, default_mode=0o644, optional=False): """ The contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling. @param name ,, str name of volume that is created. @param config_name ,, str name of the config map being used @param config_items ,,dict key value to project and The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. @param default_mode ,, int mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. @param optional ,,bool Specify whether the ConfigMap or it's keys must be defined """ config_map_vol = client.V1ConfigMapVolumeSource(default_mode=default_mode, optional=optional, name=config_name, items=config_items) return client.V1Volume(name=name, config_map=config_map) def define_empty_dir_volume(self, name, medium="", size_limit=None): """ Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling by default, emptyDir volumes are stored on whatever medium is backing the node - that might be disk or SSD or network storage, depending on your environment. However, you can set the emptyDir.medium field to "Memory" to tell Kubernetes to mount a tmpfs (RAM-backed filesystem) for you instead. While tmpfs is very fast, be aware that unlike disks, tmpfs is cleared on node reboot and any files you write will count against your container’s memory limit. @param name ,, str name of volume that is created. @param medium ,,str What type of storage medium should back this directory. The default is "" which means to use the node's default medium. Must be an empty string (default) or Memory. @param size_limit ,,int Total amount of local storage required for this EmptyDir volume. """ empty_dir_vol = client.V1EmptyDirVolumeSource(medium=medium, size_limit=sizeLimit) return client.V1Volume(name=name, empty_dir=empty_dir_vol) def define_git_volume(self, name, directory, repo, revision=None): """ Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabelin. @param name ,, str name of volume that is created. @param directory ,, str Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name. @param repo ,, str Repository URL @param revision ,, str Commit hash for the specified revision. """ git_vol = client.V1GitRepoVolumeSource(directory=directory, repository=repo, revision=revision) return client.V1Volume(name=name, git_repo=git_vol) def define_persistent_volume_claim(self): """ define persistent volume claim TODO """ ###################### # master.POD # ###################### def get_pod(self, name, namespace='default'): """ will get the pod with the defined name and namespace. if no namespace defined , will default to the 'default' name space : !! important. @param name ,, str deployment name. @param namespace,, str namespace to filter on. """ pod_obj = self._v1.read_namespaced_pod(name, namespace) return Pod(pod_obj.metadata.name, self, [], pod_object=pod_object) def list_pods(self, namespace=None, short=True): """ will get all pod within the defined namespace(if no space given will return all). @param namespace,, str namespace to filter on. @param short,, bool return small dict if true return full object if false """ if namespace: pod_objects = self._v1.list_namespaced_pod(namespace).items else: pod_objects = self._v1.list_pod_for_all_namespaces().items if short: output = [] for pod in pod_objects: pod_dict = {'name': pod.metadata.name, 'namespace': pod.metadata.namespace, 'status': pod.status.phase, 'ip': pod.status.pod_ip} output.append(pod_dict) return output return [Pod(pod_object.metadata.name, self, [], pod_object=pod_object)for pod_object in pod_objects] def define_pod(self): """ define a Pod object instance TODO """ # return Pod(name, master, containers, labels, replicas, api_version, kind, ssh_key) def define_affinity(self): """ define affinity object to be passed to a pod, deployment or statefulset TODO """ ###################### # master.Container # ###################### def define_container(self, name, image, ports=[], command=None, args=None, sshkey_path=None, enable_ssh=False, envs=[], volume_mounts=[]): """ define container object to be passed to pod creation or deployment @param name,,str name of the container @param image,,str name (full path) of image @param ports,,list(int) list of ports to expose to the node @param command,, list(str) entry point to the docker @param args ,, list(str) args to be passed to the entry point @param sshkey_path,, str full path to ssh_key will work only if enable_ssh is true @param enable_ssh,, bool if True and no key is passed will load the default loaded one in jumpscale me configs @param envs,, list({'key':'value'}) environment variable to define in the container @param volume_mounts,, list(volume_mounts) volumes to mount to the containers created from define mount """ envs = [client.V1EnvVar(env.key, env.value) for env in envs] # get ssh_pub_key if not provided will default to the preloaded if enable_ssh: if sshkey_path: pub_key = j.sal.fs.readFile(sshkey_path) else: pub_key = j.sal.fs.readFile(self.sshkey_path) # the key must be added in the command to be executed on restarts and recovery. joined_command = '' joined_args = '' new_command_args = '' if command: joined_command = "".join(command) if args: joined_args = "".join(args) new_command_args = "&& %s %s" % (joined_command, joined_args) envs.append(client.V1EnvVar('PUBSSHKEY', pub_key)) command = ['/bin/bash'] args = [ "-c", "service ssh start && mkdir -p /root/.ssh/ && echo ${PUBSSHKEY} > /root/.ssh/authorized_keys %s" % new_command_args] # Configureate Pod template container container_ports = [client.V1ContainerPort( container_port=port) for port in ports] container = client.V1Container( name=name, image=image, env=envs, ports=container_ports, command=command, args=args, stdin=True, volume_mounts=volume_mounts) return container # The client can be extended for container , but at the moment does not seem necessary. def define_mount(self, name, mount_path, read_only=False, sub_path=""): """ define the mount on the container to attach a volume. @param name,, This must match the Name of a Volume. @param mountPath,, Path within the container at which the volume should be mounted. Must not contain ':'. @param readOnly,, Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. @param subPath,,Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). """ return client.V1VolumeMount(name=name, mount_path=mount_path, read_only=read_only, sub_path=sub_path) ###################### # master.Service # ###################### def get_service(self, name, namespace='default'): """ will get the service with the defined name and namespace. if no namespace defined , will default to the 'default' name space : !! important. @param name ,, str deployment name. @param namespace,, str namespace to filter on. """ service_object = self._v1.read_namespaced_service(name, namespace) return Service(service_object.metadata.name, self, service_object=service_object) def list_services(self, namespace=None, short=True): """ will get all services within the defined namespace(if no space given will return all). @param namespace,, str namespace to filter on. @param short,, bool return small dict if true return full object if false """ services = [] if namespace: service_objs = self._v1.list_namespaced_service(namespace).items else: service_objs = self._v1.list_service_for_all_namespaces().items if short: output = [] for service in service_objs: service_dict = {'name': service.metadata.name, 'namespace': service.metadata.namespace, 'ports': service.spec.ports} output.append(service_dict) return output for service_obj in service_objs: services.append(Service(service_obj.metadata.name, self, service_object=service_obj)) return services def define_service(self, name, selector, ports, protocol=None, service_type='LoadBalancer'): """ define service object returning the Service object. @param name ,, name of the service that will be created , prefered to be the same as the selector app value. @param ports ,, list(str) list of comma seprated string [internalport:externalport], e.g. ['22:2202'] @param selector ,,list({string:string}) points towards the app the service will expose usually {'app':appname} @param protocol ,, str tcp or udp , default to tcp @param service_type,, determine the typed of networking service to create , can be ClusterIP , NodePort, or LoadBalancer """ return Service(name=name, master=self, selector=selector, ports=ports, protocol=protocol, service_type=service_type) def define_ssh_service(self, name, app, external_port): """ creates and deploys the Service that does the portforwarding for ssh from the pod to outside the node. ssh service, keys, and port need to be configured withing the pod or deployment. @param name,, str name of the service @param app,, str app label that relates to the deployment or deployments to allow ssh on @param external_port,, int external port to map the ssh 22 port to. """ port = ['22:%d' % external_port] service = self.define_service(name, {'app': app}, port, service_type='NodePort') return service ###################### # DEPLOYMENT # ###################### class Deployment(JSBASE): """ Kubernetes cluster wrapper layer. """ def __init__(self, name, master, containers, labels={}, replicas=1, api_version='extensions/v1beta1', cluster_name=None, namespace='default', generate_name=None, min_ready_seconds=0, progress_deadline_seconds=600, deployment_strategy_type='RollingUpdate', dns_policy='ClusterFirstWithHostNet', deployment_strategy_rolling_update=None, selectors=None, deployment_object=None, volumes=[]): """ Create a new deployment object in the specified cluster with specified label. @param name,,str name of the deployment. @param master,, KubernetesMaster the master object that has all the clients and the config to connect to. @param containers,, list(V1Container) this can be produces using the self.define_container method on mater. @param labels,,list({string:string}) labels to apply to the pod @param replicas,, number of replicas to maintain until the end of lifetime of deployment. @param cluster_name,,str cluster to create the pod on @param namespace,, str namespace to relate the pod to @param dns_policy,,str set DNS policy for containers within the pod. One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'. Defaults to "ClusterFirst". To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. @param selector,,{string:string} is a selector which must be true for the deployment to fit on a node or cluster. @param generate_name,, str the generated name for deployment. @param progress_deadline_seconds,, int The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s. @param deployment_strategy_type,, str Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. @param deployment_strategy_rolling_update,, {maxSurge:int, maxUnavailable: int} @param min_ready_seconds,, int Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) @param volumes,, list(V1Volume) can be created from the define_?_volume methods """ JSBASE.__init__(self) self.object = deployment_object if not deployment_object: kind = 'Deployment' labels.update({'app': name}) # Create and configure pod spec section pod_spec = client.V1PodTemplateSpec(metadata=client.V1ObjectMeta(labels=labels), spec=client.V1PodSpec(containers=containers, dns_policy=dns_policy, volumes=volumes)) # create deployment_strategy deployment_strategy = client.AppsV1beta1DeploymentStrategy(rolling_update=deployment_strategy_rolling_update, type=deployment_strategy_type) # Create the specification of deployment selector = None if selectors: selector = client.V1LabelSelector([], selectors) deployment_spec = client.ExtensionsV1beta1DeploymentSpec(replicas=replicas, template=pod_spec, progress_deadline_seconds=progress_deadline_seconds, min_ready_seconds=min_ready_seconds, strategy=deployment_strategy, selector=selector) # Instantiate the deployment object self.object = client.ExtensionsV1beta1Deployment(api_version=api_version, kind=kind, spec=deployment_spec, metadata=client.V1ObjectMeta(name=name, cluster_name=cluster_name, namespace=namespace, generate_name=generate_name)) self.master = master def __str__(self): return self.object.to_str() def __dict__(self): return self.object.to_dict() def __repr__(self): return self.object.__repr__() def create(self): """ Create a new deployment. @param deployment ExtensionsV1beta1Deployment (an object that is created through self.define_deployment) """ # Create deployment api_response = self.master._extensionv1b1.create_namespaced_deployment( body=self.object, namespace=self.object.metadata.namespace) self.logger.info("Deployment created. status='%s'" % str(api_response.status)) def update(self): """ Update the deployment by applying the changes the happened in the deployment object. @param deployment,, ExtensionsV1beta1Deployment (an object that is created through self.define_deployment) """ # Update the deployment api_response = self.master._extensionv1b1.patch_namespaced_deployment(name=self.object.metadata.name, namespace=self.object.metadata.namespace, body=self.object) self.logger.info("Deployment updated. status='%s'" % str(api_response.status)) def delete(self, grace_period_seconds=0, propagation_policy='Foreground'): """ delete the named deployment. @param name,, str :name of the deployment. @param grace_period_seconds,, int :The duration in seconds before the object should be deleted. @param propagation_policy,, str :Whether and how garbage collection will be performed. """ # delete options delete_options = client.V1DeleteOptions(propagation_policy=propagation_policy, grace_period_seconds=grace_period_seconds) # Delete deployment api_response = self.master._extensionv1b1.delete_namespaced_deployment(name=self.object.metadata.name, namespace=self.object.metadata.namespace, body=delete_options) self.logger.info("Deployment deleted. status='%s'" % str(api_response.status)) ###################### # POD # ###################### class Pod(client.V1Pod, JSBASE): """ Kubernetes Pod wrapper layer. """ def __init__(self, name, master, containers, namespace='default', cluster_name=None, host_aliases={}, dns_policy='ClusterFirstWithHostNet', labels={}, host_network=True, hostname=None, init_containers=None, node_name=None, node_selector=None, subdomain=None, volumes=None, pod_object=None): """ Create a new pod object in the specified cluster with specified label. @param name,,str name of the deployment @param master,, KubernetesMaster the master object that has all the clients and the config to connect to. @param containers,, list(V1Container) this can be produces using the self.define_container method on mater. @param namespace,, str namespace to relate the pod to @param cluster_name,,str cluster to create the pod on @param host_aliases,,list({hostname: ip}) is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods. @param dns_policy,,str set DNS policy for containers within the pod. One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'. Defaults to "ClusterFirst". To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. @param labels,,list({string:string}) labels to apply to the pod @param host_network,,bool Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false. @param hostname,,str Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value. @param init_containers,, list(V1Container) List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/ini @param node_name,,str NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements. @param node_selector,,list({string:string}) NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ @param subdomain,,str If specified, the fully qualified Pod hostname will be "...svc.". If not specified, the pod will not have a domainname at all. """ JSBASE.__init__(self) self.object = pod_object if not pod_object: # create metadata for the pod metadata = client.V1ObjectMeta( name=name, cluster_name=cluster_name, namespace=namespace, labels=labels) # get volumes volume_objects = [] for volume in volumes: volume_objects.append(master._v1.read_persistent_volume(volume)) # Create and configure pod spec section pod_spec = client.V1PodSpec(host_aliases=host_aliases, dns_policy=dns_policy, containers=containers, host_network=host_network, hostname=hostname, init_containers=init_containers, node_name=node_name, node_selector=node_selector, restart_policy='Always', subdomain=subdomain, volumes=volumes) self.object = client.V1Pod('v1', 'Pod', metadata=metadata, spec=pod_spec) self.master = master def __str__(self): return self.object.to_str() def __dict__(self): return self.object.to_dict() def __repr__(self): return self.object.__repr__() def create(self): """ Create a new pod. """ # Create deployment api_response = self.master._extensionv1b1.create_namespaced_pod( body=self.object, namespace=self.object.metadata.namespace) self.logger.info("Pod created. status='%s'" % str(api_response.status)) def update(self): """ Update the pod by applying the changes the happened in the pod object. """ # Update the pod api_response = self.master._extensionv1b1.patch_namespaced_pod(name=self.object.metadata.name, namespace=self.object.metadata.namespace, body=self.object) self.logger.info("Pod updated. status='%s'" % str(api_response.status)) def delete(self, grace_period_seconds=0, propagation_policy='Foreground'): """ delete the pod the object relates to. @param name,, str :name of the pod. @param grace_period_seconds,, int :The duration in seconds before the object should be deleted. @param propagation_policy,, str :Whether and how garbage collection will be performed. """ # delete options delete_options = client.V1DeleteOptions(propagation_policy=propagation_policy, grace_period_seconds=grace_period_seconds) # Delete pod api_response = self.master._extensionv1b1.delete_namespaced_pod(name=self.object.metadata.name, namespace=self.object.metadata.namespace, body=delete_options) self.logger.info("Pod deleted. status='%s'" % str(api_response.status)) ###################### # Service # ###################### class Service(client.V1Service, JSBASE): def __init__(self, name, master, selector=None, ports=None, namespace='default', protocol='tcp', service_type='LoadBalancer', service_object=None): """ Create a new Service object in the specified cluster with specified selector and other options. @param name,, name of the service object @param master,, KubernetesMaster the master object that has all the clients and the config to connect to. @param selector,,list({string:string}) points towards the app the service will expose usually {'app':appname} @param namespace,, str namespace to relate the service to @param ports ,, list(str) list of comma seprated string [internalport:externalport], e.g. ['22:32202'] @param protocol,,str tcp or udp , default to tcp @param service_type,,str type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. "ExternalName" maps to the specified externalName. "ClusterIP" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is "None", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. "NodePort" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. "LoadBalancer" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types """ JSBASE.__init__(self) self.object = service_object if not service_object: # create etadata for the service metadata = client.V1ObjectMeta(name=name, namespace=namespace) service_ports = [] for port_pair in ports: if service_type == 'LoadBalancer' or service_type == 'ClusterIP': internal_port, _ = port_pair.split(':') service_ports.append(client.V1ServicePort('%s-%s' % (name, internal_port), port=int(internal_port), protocol=protocol)) else: internal_port, external_port = port_pair.split(':') service_ports.append(client.V1ServicePort('%s-%s-%s' % (name, internal_port, external_port), port=int(internal_port), node_port=int(external_port), protocol=protocol)) # create the specs service_spec = client.V1ServiceSpec( selector=selector, ports=service_ports, type=service_type) # define the service self.object = client.V1Service(api_version='v1', kind='Service', metadata=metadata, spec=service_spec) self.master = master def __str__(self): return self.object.to_str() def __dict__(self): return self.object.to_dict() def __repr__(self): return self.object.__repr__() def create(self): """ Create a new service. """ # Create deployment api_response = self.master._v1.create_namespaced_service( body=self.object, namespace=self.object.metadata.namespace) self.logger.info("service created. status='%s'" % str(api_response.status)) def update(self): """ Update the service by applying the changes the happened in the service object. """ # Update the service api_response = self.master._v1.patch_namespaced_service(name=self.object.metadata.name, namespace=self.object.metadata.namespace, body=self.object) self.logger.info("service updated. status='%s'" % str(api_response.status)) def delete(self): """ delete the named service. @param name,, str :name of the service. """ # Delete service api_response = self.master._v1.delete_namespaced_service(name=self.object.metadata.name, namespace=self.object.metadata.namespace) self.logger.info("service deleted. status='%s'" % str(api_response.status))
54.282432
901
0.633698
e12e8b92ae78c8c453d8c7d376da17054d7ddbda
2,839
py
Python
simple_db_builder/builder.py
Nunie123/simple_db_builder
4dea6d7e18e076405d633d8070a8c918f77dfe88
[ "MIT" ]
null
null
null
simple_db_builder/builder.py
Nunie123/simple_db_builder
4dea6d7e18e076405d633d8070a8c918f77dfe88
[ "MIT" ]
null
null
null
simple_db_builder/builder.py
Nunie123/simple_db_builder
4dea6d7e18e076405d633d8070a8c918f77dfe88
[ "MIT" ]
null
null
null
import argparse, json, logging, datetime from connections import Connection from helpers import time_and_log logging.basicConfig( filename="log.log", level=logging.DEBUG, format="%(asctime)s:%(levelname)s:%(message)s" ) @time_and_log(logger=logging) def execute_and_log_raw_sql(connection, raw_sql_to_execute): connection.execute_sql_from_string(raw_sql_to_execute) return None @time_and_log(logger=logging) def execute_and_log_sql_file(connection, file_location): connection.execute_sql_from_file(file_location) return None @time_and_log(logger=logging) def execute_and_log_stored_procedure(connection, stored_procedure_name): connection.execute_stored_procedure(stored_procedure_name) return None def execute_raw_sql_from_settings_dict(connection, build_settings): raw_sql_to_execute = build_settings.get('raw_sql_to_execute') if raw_sql_to_execute: execute_and_log_raw_sql(connection, raw_sql_to_execute) return None def execute_sql_file_list_from_settings_dict(connection, build_settings): sql_file_list = build_settings.get('raw_sql_file_locations', []) for sql_file in sql_file_list: execute_and_log_sql_file(connection, sql_file) return None def execute_stored_procedure_list_from_settings_dict(connection, build_settings): stored_procedure_list = build_settings.get('stored_procedure_names', []) for sp_name in stored_procedure_list: execute_and_log_stored_procedure(connection, sp_name) return None @time_and_log(logger=logging) def build_db(build_name, credentials_file_location='config.ini', build_settings_file_location='settings.json'): with open(build_settings_file_location) as f: build_settings_list = json.load(f) build_settings = [d for d in build_settings_list if d['build_name'] == build_name][0] connection_name = build_settings['connection_name'] if not build_settings: logging.error(f'Provided build_name not found in {build_settings_file_location}') raise Exception(f'Provided build_name not found in {build_settings_file_location}') connection = Connection(connection_name, credentials_file_location) execute_raw_sql_from_settings_dict(connection, build_settings) execute_sql_file_list_from_settings_dict(connection, build_settings) execute_stored_procedure_list_from_settings_dict(connection, build_settings) return f'Database built for connection {connection_name}.' def get_parser(): parser = argparse.ArgumentParser(description="Simple-DB-Builder") parser.add_argument("-n", "--build_name", type=str, help="location of file with database build settings") return parser if __name__ == '__main__': parser = get_parser() args = parser.parse_args() build_db(args.build_name)
34.621951
111
0.777739
4dfaf234f8097a41d863c34721c99d8e7d182843
627
py
Python
business/enums.py
kthaisse/website
be0d0e0763ae2a6b8351c08b432229eae9521f1d
[ "MIT" ]
null
null
null
business/enums.py
kthaisse/website
be0d0e0763ae2a6b8351c08b432229eae9521f1d
[ "MIT" ]
null
null
null
business/enums.py
kthaisse/website
be0d0e0763ae2a6b8351c08b432229eae9521f1d
[ "MIT" ]
null
null
null
import enum class ContactType(enum.IntEnum): REPRESENTATIVE = 0 RECRUITER = 1 TECHNICAL = 2 class OfferType(enum.IntEnum): INTERNSHIP = 0 SUMMER_INTERNSHIP = 1 PART_TIME = 2 FULL_TIME = 3 VOLUNTEER = 4 OTHER = 5 MASTER_THESIS = 6 OfferType.labels = { OfferType.INTERNSHIP: "Internship", OfferType.SUMMER_INTERNSHIP: "Summer internship", OfferType.PART_TIME: "Part-time job", OfferType.FULL_TIME: "Full-time job", OfferType.VOLUNTEER: "Volunteering", OfferType.MASTER_THESIS: "Master thesis", OfferType.OTHER: "Other", } OfferTypeDict = OfferType.labels
20.9
53
0.6874
0a3bb24290d481a69e83239a9e3265a782a9b714
1,395
py
Python
src/resume/views.py
zhangjie2012/qwerty-server
56cfae83381df007cffce4c1cbf8f786f44e63b8
[ "MIT" ]
6
2018-11-07T03:46:10.000Z
2019-06-21T13:51:19.000Z
src/resume/views.py
zhangjie2012/qwerty
56cfae83381df007cffce4c1cbf8f786f44e63b8
[ "MIT" ]
1
2018-11-26T15:23:24.000Z
2018-11-26T15:23:24.000Z
src/resume/views.py
zhangjie2012/qwerty
56cfae83381df007cffce4c1cbf8f786f44e63b8
[ "MIT" ]
1
2020-10-01T11:00:33.000Z
2020-10-01T11:00:33.000Z
from utils.md_render import md_render from utils.logger import logger from utils.http_tools import SuccessResponse from utils.restful import RESTful from .models import Job, Education class ResumeHandler(RESTful): def get(self, request): job_qs = Job.objects.all() job_list = [] for job in job_qs: job_list.append({ 'company_name': job.company_name, 'company_site': job.company_site, 'start_dt': job.start_dt, 'end_dt': job.end_dt, 'title': job.title, 'product': md_render(job.product), 'duties': job.duties, 'tech_stack': md_render(job.tech_stack), }) education_qs = Education.objects.all() education_list = [] for education in education_qs: education_list.append({ 'school_name': education.school_name, 'school_site': education.school_site, 'start_dt': education.start_dt, 'end_dt': education.end_dt, 'degree': education.degree, 'major': education.major, 'course': education.course, }) logger.debug('query resume') return SuccessResponse({ 'job_list': job_list, 'education_list': education_list, })
31.704545
56
0.555556
f706ac41f003b9f5949025c1d816184bd84d4f9a
950
py
Python
programs/pgm16_10.py
danielsunzhongyuan/python_practice
79bc88db1c52ee2f5607f6f9fec1bbacea2804ff
[ "Apache-2.0" ]
null
null
null
programs/pgm16_10.py
danielsunzhongyuan/python_practice
79bc88db1c52ee2f5607f6f9fec1bbacea2804ff
[ "Apache-2.0" ]
null
null
null
programs/pgm16_10.py
danielsunzhongyuan/python_practice
79bc88db1c52ee2f5607f6f9fec1bbacea2804ff
[ "Apache-2.0" ]
null
null
null
# # This file contains the Python code from Program 16.10 of # "Data Structures and Algorithms # with Object-Oriented Design Patterns in Python" # by Bruno R. Preiss. # # Copyright (c) 2003 by Bruno R. Preiss, P.Eng. All rights reserved. # # http://www.brpreiss.com/books/opus7/programs/pgm16_10.txt # class Graph(Container): def breadthFirstTraversal(self, visitor, start): assert isinstance(visitor, Visitor) enqueued = Array(self._numberOfVertices) for v in xrange(self._numberOfVertices): enqueued[v] = False queue = QueueAsLinkedList() queue.enqueue(self[start]) enqueued[start] = True while not queue.isEmpty and not visitor.isDone: v = queue.dequeue() visitor.visit(v) for to in v.successors: if not enqueued[to.number]: queue.enqueue(to) enqueued[to.number] = True # ...
31.666667
69
0.622105
3d6aae6163dc31a2d99568d42a5b0753137b58cd
3,281
py
Python
tests/views/message/test_db_input.py
QueoLda/django-unicorn
01573cd65282c467bfb0925542b180ffa9efba05
[ "MIT" ]
null
null
null
tests/views/message/test_db_input.py
QueoLda/django-unicorn
01573cd65282c467bfb0925542b180ffa9efba05
[ "MIT" ]
null
null
null
tests/views/message/test_db_input.py
QueoLda/django-unicorn
01573cd65282c467bfb0925542b180ffa9efba05
[ "MIT" ]
null
null
null
import time import orjson import pytest import shortuuid from django_unicorn.utils import dicts_equal, generate_checksum from example.coffee.models import Flavor @pytest.mark.django_db def test_message_db_input_update(client): flavor = Flavor(id=1, name="Enzymatic-Flowery") flavor.save() data = {"flavors": [{"pk": flavor.pk, "name": flavor.name}]} message = { "actionQueue": [ { "payload": { "model": "flavors", "db": {"pk": flavor.pk, "name": "flavor"}, "fields": {"name": "Flowery-Floral"}, }, "type": "dbInput", }, {"type": "callMethod", "payload": {"name": "$refresh", "params": []}}, ], "data": data, "checksum": generate_checksum(orjson.dumps(data)), "id": shortuuid.uuid()[:8], "epoch": time.time(), } response = client.post( "/message/tests.views.fake_components.FakeModelComponent", message, content_type="application/json", ) flavor = Flavor.objects.get(id=1) assert flavor.name == "Flowery-Floral" body = orjson.loads(response.content) assert not body["errors"] expected = { "flavors": [ { "pk": 1, "name": "Flowery-Floral", "decimal_value": None, "float_value": None, "label": "", "parent": None, "uuid": str(flavor.uuid), "datetime": None, "date": None, "time": None, "duration": None, } ] } assert dicts_equal(expected, body["data"]) @pytest.mark.django_db def test_message_db_input_create(client): data = {"flavors": []} message = { "actionQueue": [ { "payload": { "model": "flavors", "db": {"pk": "", "name": "flavor"}, "fields": {"name": "Sugar Browning-Nutty"}, }, "type": "dbInput", }, {"type": "callMethod", "payload": {"name": "$refresh", "params": []}}, ], "data": data, "checksum": generate_checksum(orjson.dumps(data)), "id": shortuuid.uuid()[:8], "epoch": time.time(), } assert Flavor.objects.all().count() == 0 response = client.post( "/message/tests.views.fake_components.FakeModelComponent", message, content_type="application/json", ) flavor = Flavor.objects.get(id=1) assert flavor.name == "Sugar Browning-Nutty" body = orjson.loads(response.content) expected = { "flavors": [ { "name": "Sugar Browning-Nutty", "label": "", "parent": None, "float_value": None, "decimal_value": None, "uuid": str(flavor.uuid), "datetime": None, "date": None, "time": None, "duration": None, "pk": 1, } ] } assert not body["errors"] assert dicts_equal(expected, body["data"])
26.674797
82
0.472112
3f229930bcc23798c156957ffa09b8ee41063403
14
py
Python
projects/bcrypt/test.py
quinn-dougherty/python-on-nix
910d3f6554acd4a4ef0425ebccd31104dccb283c
[ "Unlicense" ]
25
2021-10-30T19:54:59.000Z
2022-03-29T06:11:02.000Z
projects/bcrypt/test.py
quinn-dougherty/python-on-nix
910d3f6554acd4a4ef0425ebccd31104dccb283c
[ "Unlicense" ]
21
2021-10-19T01:09:38.000Z
2022-03-24T16:08:53.000Z
projects/bcrypt/test.py
quinn-dougherty/python-on-nix
910d3f6554acd4a4ef0425ebccd31104dccb283c
[ "Unlicense" ]
3
2022-01-25T20:25:13.000Z
2022-03-08T02:58:50.000Z
import bcrypt
7
13
0.857143
558fa51cc92706593d4d25056d7b2b600a587c81
78,332
py
Python
src/sqlfluff/dialects/dialect_sparksql.py
KulykDmytro/sqlfluff
f0adec2715bc444a897a3df2609ac08fd719a57e
[ "MIT" ]
null
null
null
src/sqlfluff/dialects/dialect_sparksql.py
KulykDmytro/sqlfluff
f0adec2715bc444a897a3df2609ac08fd719a57e
[ "MIT" ]
null
null
null
src/sqlfluff/dialects/dialect_sparksql.py
KulykDmytro/sqlfluff
f0adec2715bc444a897a3df2609ac08fd719a57e
[ "MIT" ]
null
null
null
"""The ANSI Compliant SparkSQL dialect. Inherits from ANSI. Spark SQL ANSI Mode is more restrictive regarding keywords than the Default Mode, and still shares some syntax with hive. Based on: https://spark.apache.org/docs/latest/sql-ref.html https://spark.apache.org/docs/latest/sql-ref-ansi-compliance.html https://github.com/apache/spark/blob/master/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4 """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, BaseSegment, Bracketed, CommentSegment, Conditional, Dedent, Delimited, Indent, NamedParser, OneOf, OptionallyBracketed, Ref, RegexLexer, Sequence, StringParser, SymbolSegment, Anything, StartsWith, RegexParser, Matchable, ) from sqlfluff.core.parser.segments.raw import CodeSegment, KeywordSegment from sqlfluff.dialects.dialect_sparksql_keywords import ( RESERVED_KEYWORDS, UNRESERVED_KEYWORDS, ) from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects import dialect_hive as hive ansi_dialect = load_raw_dialect("ansi") hive_dialect = load_raw_dialect("hive") sparksql_dialect = ansi_dialect.copy_as("sparksql") sparksql_dialect.patch_lexer_matchers( [ # Spark SQL, only -- is used for single-line comment RegexLexer( "inline_comment", r"(--)[^\n]*", CommentSegment, segment_kwargs={"trim_start": "--"}, ), # == and <=> are valid equal operations # <=> is a non-null equals in Spark SQL # https://spark.apache.org/docs/latest/api/sql/index.html#_10 RegexLexer("equals", r"=|==|<=>", CodeSegment), # identifiers are delimited with ` # within a delimited identifier, ` is used to escape special characters, # including ` # Ex: select `delimited `` with escaped` from `just delimited` # https://spark.apache.org/docs/latest/sql-ref-identifier.html#delimited-identifier RegexLexer("back_quote", r"`([^`]|``)*`", CodeSegment), # Numeric literal matches integers, decimals, and exponential formats. # https://spark.apache.org/docs/latest/sql-ref-literals.html#numeric-literal # Pattern breakdown: # (?> Atomic grouping # (https://www.regular-expressions.info/atomic.html). # 3 distinct groups here: # 1. Obvious fractional types # (can optionally be exponential). # 2. Integer followed by exponential. # These must be fractional types. # 3. Integer only. # These can either be integral or # fractional types. # # (?> 1. # \d+\.\d+ e.g. 123.456 # |\d+\. e.g. 123. # |\.\d+ e.g. .123 # ) # ([eE][+-]?\d+)? Optional exponential. # ([dDfF]|BD|bd)? Fractional data types. # |\d+[eE][+-]?\d+([dDfF]|BD|bd)? 2. Integer + exponential with # fractional data types. # |\d+([dDfFlLsSyY]|BD|bd)? 3. Integer only with integral or # fractional data types. # ) # ( # (?<=\.) If matched character ends with . # (e.g. 123.) then don't worry about # word boundary check. # |(?=\b) Check that we are at word boundary to # avoid matching valid naked identifiers # (e.g. 123column). # ) RegexLexer( "numeric_literal", ( r"(?>(?>\d+\.\d+|\d+\.|\.\d+)([eE][+-]?\d+)?([dDfF]|BD|bd)?" r"|\d+[eE][+-]?\d+([dDfF]|BD|bd)?" r"|\d+([dDfFlLsSyY]|BD|bd)?)" r"((?<=\.)|(?=\b))" ), CodeSegment, ), ] ) sparksql_dialect.insert_lexer_matchers( [ RegexLexer("bytes_single_quote", r"X'([^'\\]|\\.)*'", CodeSegment), RegexLexer("bytes_double_quote", r'X"([^"\\]|\\.)*"', CodeSegment), ], before="single_quote", ) sparksql_dialect.insert_lexer_matchers( [ RegexLexer("at_sign_literal", r"@\w*", CodeSegment), ], before="code", ) # Set the bare functions sparksql_dialect.sets("bare_functions").clear() sparksql_dialect.sets("bare_functions").update( [ "CURRENT_DATE", "CURRENT_TIMESTAMP", "CURRENT_USER", ] ) # Set the datetime units sparksql_dialect.sets("datetime_units").clear() sparksql_dialect.sets("datetime_units").update( [ "YEAR", # Alternate syntax for YEAR "YYYY", "YY", "QUARTER", "MONTH", # Alternate syntax for MONTH "MON", "MM", "WEEK", "DAY", # Alternate syntax for DAY "DD", "HOUR", "MINUTE", "SECOND", ] ) # Set Keywords sparksql_dialect.sets("unreserved_keywords").update(UNRESERVED_KEYWORDS) sparksql_dialect.sets("reserved_keywords").update(RESERVED_KEYWORDS) # Set Angle Bracket Pairs sparksql_dialect.sets("angle_bracket_pairs").update( [ ("angle", "StartAngleBracketSegment", "EndAngleBracketSegment", False), ] ) # Real Segments sparksql_dialect.replace( ComparisonOperatorGrammar=OneOf( Ref("EqualsSegment"), Ref("EqualsSegment_a"), Ref("EqualsSegment_b"), Ref("GreaterThanSegment"), Ref("LessThanSegment"), Ref("GreaterThanOrEqualToSegment"), Ref("LessThanOrEqualToSegment"), Ref("NotEqualToSegment"), Ref("LikeOperatorSegment"), ), FromClauseTerminatorGrammar=OneOf( "WHERE", "LIMIT", Sequence("GROUP", "BY"), Sequence("ORDER", "BY"), Sequence("CLUSTER", "BY"), Sequence("DISTRIBUTE", "BY"), Sequence("SORT", "BY"), "HAVING", Ref("SetOperatorSegment"), Ref("WithNoSchemaBindingClauseSegment"), Ref("WithDataClauseSegment"), ), TemporaryGrammar=Sequence( Sequence("GLOBAL", optional=True), OneOf("TEMP", "TEMPORARY"), ), QuotedLiteralSegment=OneOf( NamedParser("single_quote", CodeSegment, name="quoted_literal", type="literal"), NamedParser("double_quote", CodeSegment, name="quoted_literal", type="literal"), ), LiteralGrammar=ansi_dialect.get_grammar("LiteralGrammar").copy( insert=[ Ref("BytesQuotedLiteralSegment"), ] ), NaturalJoinKeywordsGrammar=Sequence( "NATURAL", Ref("JoinTypeKeywords", optional=True), ), LikeGrammar=OneOf( # https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-like.html Sequence( "LIKE", OneOf( "ALL", "ANY", # `SOME` is equivalent to `ANY` "SOME", optional=True, ), ), "RLIKE", "REGEXP", ), SelectClauseSegmentGrammar=Sequence( "SELECT", OneOf( Ref("TransformClauseSegment"), Sequence( Ref( "SelectClauseModifierSegment", optional=True, ), Indent, Delimited( Ref("SelectClauseElementSegment"), allow_trailing=True, ), ), ), # NB: The Dedent for the indent above lives in the # SelectStatementSegment so that it sits in the right # place corresponding to the whitespace. ), SingleIdentifierGrammar=OneOf( Ref("NakedIdentifierSegment"), Ref("QuotedIdentifierSegment"), Ref("SingleQuotedIdentifierSegment"), Ref("BackQuotedIdentifierSegment"), ), ) sparksql_dialect.add( BackQuotedIdentifierSegment=NamedParser( "back_quote", CodeSegment, name="quoted_identifier", type="identifier", trim_chars=("`",), ), BinaryfileKeywordSegment=StringParser( "BINARYFILE", KeywordSegment, name="binary_file", type="file_format", ), JsonfileKeywordSegment=StringParser( "JSONFILE", KeywordSegment, name="json_file", type="file_format", ), RcfileKeywordSegment=StringParser( "RCFILE", KeywordSegment, name="rc_file", type="file_format" ), SequencefileKeywordSegment=StringParser( "SEQUENCEFILE", KeywordSegment, name="sequence_file", type="file_format" ), TextfileKeywordSegment=StringParser( "TEXTFILE", KeywordSegment, name="text_file", type="file_format" ), StartAngleBracketSegment=StringParser( "<", SymbolSegment, name="start_angle_bracket", type="start_angle_bracket" ), EndAngleBracketSegment=StringParser( ">", SymbolSegment, name="end_angle_bracket", type="end_angle_bracket" ), EqualsSegment_a=StringParser( "==", SymbolSegment, name="equals", type="comparison_operator" ), EqualsSegment_b=StringParser( "<=>", SymbolSegment, name="equals", type="comparison_operator" ), FileKeywordSegment=RegexParser( "FILES?", KeywordSegment, name="file", type="file_keyword" ), JarKeywordSegment=RegexParser( "JARS?", KeywordSegment, name="jar", type="file_keyword" ), NoscanKeywordSegment=StringParser( "NOSCAN", KeywordSegment, name="noscan_keyword", type="keyword" ), WhlKeywordSegment=StringParser( "WHL", KeywordSegment, name="whl", type="file_keyword" ), SQLConfPropertiesSegment=Sequence( StringParser("-", SymbolSegment, name="dash", type="dash"), StringParser( "v", SymbolSegment, name="sql_conf_option_set", type="sql_conf_option" ), allow_gaps=False, ), # Add relevant Hive Grammar CommentGrammar=hive_dialect.get_grammar("CommentGrammar"), LocationGrammar=hive_dialect.get_grammar("LocationGrammar"), SerdePropertiesGrammar=hive_dialect.get_grammar("SerdePropertiesGrammar"), StoredAsGrammar=hive_dialect.get_grammar("StoredAsGrammar"), StoredByGrammar=hive_dialect.get_grammar("StoredByGrammar"), StorageFormatGrammar=hive_dialect.get_grammar("StorageFormatGrammar"), TerminatedByGrammar=hive_dialect.get_grammar("TerminatedByGrammar"), # Add Spark Grammar PropertyGrammar=Sequence( Ref("PropertyNameSegment"), Ref("EqualsSegment", optional=True), OneOf( Ref("LiteralGrammar"), Ref("SingleIdentifierGrammar"), ), ), PropertyNameListGrammar=Delimited(Ref("PropertyNameSegment")), BracketedPropertyNameListGrammar=Bracketed(Ref("PropertyNameListGrammar")), PropertyListGrammar=Delimited(Ref("PropertyGrammar")), BracketedPropertyListGrammar=Bracketed(Ref("PropertyListGrammar")), OptionsGrammar=Sequence("OPTIONS", Ref("BracketedPropertyListGrammar")), BucketSpecGrammar=Sequence( Ref("ClusteredBySpecGrammar"), Ref("SortedBySpecGrammar", optional=True), "INTO", Ref("NumericLiteralSegment"), "BUCKETS", ), ClusteredBySpecGrammar=Sequence( "CLUSTERED", "BY", Ref("BracketedColumnReferenceListGrammar"), ), DatabasePropertiesGrammar=Sequence( "DBPROPERTIES", Ref("BracketedPropertyListGrammar") ), DataSourcesV2FileTypeGrammar=OneOf( # https://github.com/apache/spark/tree/master/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2 # noqa: E501 # Separated here because these allow for additional # commands such as Select From File # https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-file.html # Spark Core Data Sources # https://spark.apache.org/docs/latest/sql-data-sources.html "AVRO", "CSV", "JSON", "PARQUET", "ORC", # Separated here because these allow for additional commands # Similar to DataSourcesV2 "DELTA", # https://github.com/delta-io/delta "CSV", "TEXT", "BINARYFILE", ), FileFormatGrammar=OneOf( Ref("DataSourcesV2FileTypeGrammar"), "SEQUENCEFILE", "TEXTFILE", "RCFILE", "JSONFILE", Sequence( "INPUTFORMAT", Ref("QuotedLiteralSegment"), "OUTPUTFORMAT", Ref("QuotedLiteralSegment"), ), ), DataSourceFormatGrammar=OneOf( Ref("FileFormatGrammar"), # NB: JDBC is part of DataSourceV2 but not included # there since there are no significant syntax changes "JDBC", ), TimestampAsOfGrammar=Sequence( "TIMESTAMP", "AS", "OF", OneOf( Ref("QuotedLiteralSegment"), Ref("BareFunctionSegment"), Ref("FunctionSegment"), ), ), VersionAsOfGrammar=Sequence( "VERSION", "AS", "OF", Ref("NumericLiteralSegment"), ), # Adding Hint related segments so they are not treated as generic comments # https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-hints.html StartHintSegment=StringParser("/*+", KeywordSegment, name="start_hint"), EndHintSegment=StringParser("*/", KeywordSegment, name="end_hint"), PartitionSpecGrammar=Sequence( OneOf("PARTITION", Sequence("PARTITIONED", "BY")), Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Ref("EqualsSegment", optional=True), Ref("LiteralGrammar", optional=True), Ref("CommentGrammar", optional=True), ), ), ), ), # NB: Redefined from `NakedIdentifierSegment` which uses an anti-template to # not match keywords; however, SparkSQL allows keywords to be used in table # and runtime properties. PropertiesNakedIdentifierSegment=RegexParser( r"[A-Z0-9]*[A-Z][A-Z0-9]*", CodeSegment, name="properties_naked_identifier", type="identifier", ), ResourceFileGrammar=OneOf( Ref("JarKeywordSegment"), Ref("WhlKeywordSegment"), Ref("FileKeywordSegment"), ), ResourceLocationGrammar=Sequence( "USING", Ref("ResourceFileGrammar"), Ref("QuotedLiteralSegment"), ), SortedBySpecGrammar=Sequence( "SORTED", "BY", Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), OneOf("ASC", "DESC", optional=True), ) ) ), optional=True, ), UnsetTablePropertiesGrammar=Sequence( "UNSET", "TBLPROPERTIES", Ref("IfExistsGrammar", optional=True), Ref("BracketedPropertyNameListGrammar"), ), TablePropertiesGrammar=Sequence( "TBLPROPERTIES", Ref("BracketedPropertyListGrammar") ), BytesQuotedLiteralSegment=OneOf( NamedParser( "bytes_single_quote", CodeSegment, name="bytes_quoted_literal", type="literal", ), NamedParser( "bytes_double_quote", CodeSegment, name="bytes_quoted_literal", type="literal", ), ), JoinTypeKeywords=OneOf( "CROSS", "INNER", Sequence( OneOf( "FULL", "LEFT", "RIGHT", ), Ref.keyword("OUTER", optional=True), ), Sequence( Ref.keyword("LEFT", optional=True), "SEMI", ), Sequence( Ref.keyword("LEFT", optional=True), "ANTI", ), ), AtSignLiteralSegment=NamedParser( "at_sign_literal", CodeSegment, name="at_sign_literal", type="literal", trim_chars="@", ), ) # Adding Hint related grammar before comment `block_comment` and # `single_quote` so they are applied before comment lexer so # hints are treated as such instead of comments when parsing. # https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-hints.html sparksql_dialect.insert_lexer_matchers( [ RegexLexer("start_hint", r"\/\*\+", CodeSegment), ], before="block_comment", ) sparksql_dialect.insert_lexer_matchers( [ RegexLexer("end_hint", r"\*\/", CodeSegment), ], before="single_quote", ) # Hive Segments class RowFormatClauseSegment(hive.RowFormatClauseSegment): """`ROW FORMAT` clause in a CREATE HIVEFORMAT TABLE statement.""" pass class SkewedByClauseSegment(hive.SkewedByClauseSegment): """`SKEWED BY` clause in a CREATE HIVEFORMAT TABLE statement.""" pass # Primitive Data Types class PrimitiveTypeSegment(BaseSegment): """Spark SQL Primitive data types. https://spark.apache.org/docs/latest/sql-ref-datatypes.html """ type = "primitive_type" match_grammar = OneOf( "BOOLEAN", # TODO : not currently supported; add segment - see NumericLiteralSegment # "BYTE", "TINYINT", # TODO : not currently supported; add segment - see NumericLiteralSegment # "SHORT", "SMALLINT", "INT", "BIGINT", "FLOAT", "REAL", "DOUBLE", "DATE", "TIMESTAMP", "STRING", Sequence( OneOf("CHAR", "CHARACTER", "VARCHAR"), Bracketed(Ref("NumericLiteralSegment"), optional=True), ), "BINARY", Sequence( OneOf("DECIMAL", "DEC", "NUMERIC"), Bracketed( Ref("NumericLiteralSegment"), Ref("CommaSegment"), Ref("NumericLiteralSegment"), optional=True, ), ), "INTERVAL", ) class DatatypeSegment(PrimitiveTypeSegment): """Spark SQL Data types. https://spark.apache.org/docs/latest/sql-ref-datatypes.html """ type = "data_type" match_grammar = OneOf( Ref("PrimitiveTypeSegment"), Sequence( "ARRAY", Bracketed( Ref("DatatypeSegment"), bracket_pairs_set="angle_bracket_pairs", bracket_type="angle", ), ), Sequence( "MAP", Bracketed( Sequence( Ref("PrimitiveTypeSegment"), Ref("CommaSegment"), Ref("DatatypeSegment"), ), bracket_pairs_set="angle_bracket_pairs", bracket_type="angle", ), ), Sequence( "STRUCT", Bracketed( # CommentGrammar here is valid Spark SQL # even though its not stored in Sparks Catalog Delimited( Sequence( Ref("SingleIdentifierGrammar"), Ref("ColonSegment"), Ref("DatatypeSegment"), Ref("CommentGrammar", optional=True), ), ), bracket_pairs_set="angle_bracket_pairs", bracket_type="angle", ), ), ) # Data Definition Statements # http://spark.apache.org/docs/latest/sql-ref-syntax-ddl.html class AlterDatabaseStatementSegment(BaseSegment): """An `ALTER DATABASE/SCHEMA` statement. http://spark.apache.org/docs/latest/sql-ref-syntax-ddl-alter-database.html """ type = "alter_database_statement" match_grammar = Sequence( "ALTER", OneOf("DATABASE", "SCHEMA"), Ref("DatabaseReferenceSegment"), "SET", Ref("DatabasePropertiesGrammar"), ) class AlterTableStatementSegment(ansi.AlterTableStatementSegment): """A `ALTER TABLE` statement to change the table schema or properties. http://spark.apache.org/docs/latest/sql-ref-syntax-ddl-alter-table.html """ match_grammar = Sequence( "ALTER", "TABLE", Ref("TableReferenceSegment"), OneOf( # ALTER TABLE - RENAME TO `table_identifier` Sequence( "RENAME", "TO", Ref("TableReferenceSegment"), ), # ALTER TABLE - RENAME `partition_spec` Sequence( Ref("PartitionSpecGrammar"), "RENAME", "TO", Ref("PartitionSpecGrammar"), ), # ALTER TABLE - ADD COLUMNS Sequence( "ADD", "COLUMNS", Indent, OptionallyBracketed( Delimited( Sequence( Ref("ColumnDefinitionSegment"), OneOf( "FIRST", Sequence( "AFTER", Ref("ColumnReferenceSegment"), ), optional=True, ), ), ), ), Dedent, ), # ALTER TABLE - ALTER OR CHANGE COLUMN Sequence( OneOf("ALTER", "CHANGE"), Ref.keyword("COLUMN", optional=True), Indent, AnyNumberOf( Ref( "ColumnReferenceSegment", exclude=OneOf( "COMMENT", "TYPE", Ref("DatatypeSegment"), "FIRST", "AFTER", "SET", ), ), max_times=2, ), Ref.keyword("TYPE", optional=True), Ref("DatatypeSegment", optional=True), Ref("CommentGrammar", optional=True), OneOf( "FIRST", Sequence( "AFTER", Ref("ColumnReferenceSegment"), ), optional=True, ), Sequence(OneOf("SET", "DROP"), "NOT NULL", optional=True), Dedent, ), # ALTER TABLE - REPLACE COLUMNS Sequence( "REPLACE", "COLUMNS", Bracketed( Delimited( Sequence( Ref("ColumnDefinitionSegment"), Ref("CommentGrammar", optional=True), ), ), ), ), # ALTER TABLE - ADD PARTITION Sequence( "ADD", Ref("IfNotExistsGrammar", optional=True), AnyNumberOf(Ref("PartitionSpecGrammar")), ), # ALTER TABLE - DROP PARTITION Sequence( "DROP", Ref("IfExistsGrammar", optional=True), Ref("PartitionSpecGrammar"), Sequence("PURGE", optional=True), ), # ALTER TABLE - REPAIR PARTITION Sequence("RECOVER", "PARTITIONS"), # ALTER TABLE - SET PROPERTIES Sequence("SET", Ref("TablePropertiesGrammar")), # ALTER TABLE - UNSET PROPERTIES Ref("UnsetTablePropertiesGrammar"), # ALTER TABLE - SET SERDE Sequence( Ref("PartitionSpecGrammar", optional=True), "SET", OneOf( Sequence( "SERDEPROPERTIES", Ref("BracketedPropertyListGrammar"), ), Sequence( "SERDE", Ref("QuotedLiteralSegment"), Ref("SerdePropertiesGrammar", optional=True), ), ), ), # ALTER TABLE - SET FILE FORMAT Sequence( Ref("PartitionSpecGrammar", optional=True), "SET", "FILEFORMAT", Ref("DataSourceFormatGrammar"), ), # ALTER TABLE - CHANGE FILE LOCATION Sequence( Ref("PartitionSpecGrammar"), "SET", Ref("LocationGrammar"), ), ), ) class AlterViewStatementSegment(BaseSegment): """A `ALTER VIEW` statement to change the view schema or properties. https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-alter-view.html """ type = "alter_view_statement" match_grammar = Sequence( "ALTER", "VIEW", Ref("TableReferenceSegment"), OneOf( Sequence( "RENAME", "TO", Ref("TableReferenceSegment"), ), Sequence("SET", Ref("TablePropertiesGrammar")), Ref("UnsetTablePropertiesGrammar"), Sequence( "AS", OptionallyBracketed(Ref("SelectStatementSegment")), ), ), ) class CreateDatabaseStatementSegment(ansi.CreateDatabaseStatementSegment): """A `CREATE DATABASE` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-create-database.html """ match_grammar = Sequence( "CREATE", OneOf("DATABASE", "SCHEMA"), Ref("IfNotExistsGrammar", optional=True), Ref("DatabaseReferenceSegment"), Ref("CommentGrammar", optional=True), Ref("LocationGrammar", optional=True), Sequence( "WITH", "DBPROPERTIES", Ref("BracketedPropertyListGrammar"), optional=True ), ) class CreateFunctionStatementSegment(ansi.CreateFunctionStatementSegment): """A `CREATE FUNCTION` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-create-function.html """ match_grammar = Sequence( "CREATE", Sequence("OR", "REPLACE", optional=True), Ref("TemporaryGrammar", optional=True), "FUNCTION", Anything(), ) parse_grammar = Sequence( "CREATE", Sequence("OR", "REPLACE", optional=True), Ref("TemporaryGrammar", optional=True), "FUNCTION", Ref("IfNotExistsGrammar", optional=True), Ref("FunctionNameIdentifierSegment"), "AS", Ref("QuotedLiteralSegment"), Ref("ResourceLocationGrammar", optional=True), ) class CreateTableStatementSegment(ansi.CreateTableStatementSegment): """A `CREATE TABLE` statement using a Data Source or Like. http://spark.apache.org/docs/latest/sql-ref-syntax-ddl-create-table-datasource.html https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-create-table-like.html https://docs.delta.io/latest/delta-batch.html#create-a-table """ match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "TABLE", Ref("IfNotExistsGrammar", optional=True), OneOf( Ref("FileReferenceSegment"), Ref("TableReferenceSegment"), ), OneOf( # Columns and comment syntax: Bracketed( Delimited( Sequence( OneOf( Ref("ColumnDefinitionSegment"), Ref("GeneratedColumnDefinitionSegment"), ), Ref("CommentGrammar", optional=True), ), ), ), # Like Syntax Sequence( "LIKE", OneOf( Ref("FileReferenceSegment"), Ref("TableReferenceSegment"), ), ), optional=True, ), Sequence("USING", Ref("DataSourceFormatGrammar"), optional=True), Ref("RowFormatClauseSegment", optional=True), Ref("StoredAsGrammar", optional=True), Ref("OptionsGrammar", optional=True), Ref("PartitionSpecGrammar", optional=True), Ref("BucketSpecGrammar", optional=True), AnyNumberOf( Ref("LocationGrammar", optional=True), Ref("CommentGrammar", optional=True), Ref("TablePropertiesGrammar", optional=True), ), # Create AS syntax: Sequence( "AS", OptionallyBracketed(Ref("SelectableGrammar")), optional=True, ), ) class CreateHiveFormatTableStatementSegment(hive.CreateTableStatementSegment): """A `CREATE TABLE` statement using Hive format. https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-create-table-hiveformat.html """ pass class CreateViewStatementSegment(ansi.CreateViewStatementSegment): """A `CREATE VIEW` statement. https://spark.apache.org/docs/3.0.0/sql-ref-syntax-ddl-create-view.html#syntax """ match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Ref("TemporaryGrammar", optional=True), "VIEW", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), # Columns and comment syntax: Sequence( Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Ref("CommentGrammar", optional=True), ), ), ), optional=True, ), Ref("CommentGrammar", optional=True), Ref("TablePropertiesGrammar", optional=True), "AS", OptionallyBracketed(Ref("SelectableGrammar")), Ref("WithNoSchemaBindingClauseSegment", optional=True), ) class DropFunctionStatementSegment(BaseSegment): """A `DROP FUNCTION` STATEMENT. https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-drop-function.html """ type = "drop_function_statement" match_grammar = Sequence( "DROP", Ref("TemporaryGrammar", optional=True), "FUNCTION", Ref("IfExistsGrammar", optional=True), Ref("FunctionNameSegment"), ) class MsckRepairTableStatementSegment(hive.MsckRepairTableStatementSegment): """A `REPAIR TABLE` statement using Hive MSCK (Metastore Check) format. This class inherits from Hive since Spark leverages Hive format for this command and is dependent on the Hive metastore. https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-repair-table.html """ pass class TruncateStatementSegment(ansi.TruncateStatementSegment): """A `TRUNCATE TABLE` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-truncate-table.html """ match_grammar = Sequence( "TRUNCATE", "TABLE", Ref("TableReferenceSegment"), Ref("PartitionSpecGrammar", optional=True), ) class UseDatabaseStatementSegment(BaseSegment): """A `USE DATABASE` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-usedb.html """ type = "use_database_statement" match_grammar = Sequence( "USE", Ref("DatabaseReferenceSegment"), ) # Data Manipulation Statements class InsertStatementSegment(BaseSegment): """A `INSERT [TABLE]` statement to insert or overwrite new rows into a table. https://spark.apache.org/docs/latest/sql-ref-syntax-dml-insert-into.html https://spark.apache.org/docs/latest/sql-ref-syntax-dml-insert-overwrite-table.html """ type = "insert_statement" match_grammar = Sequence( "INSERT", OneOf("INTO", "OVERWRITE"), Ref.keyword("TABLE", optional=True), Ref("TableReferenceSegment"), Ref("PartitionSpecGrammar", optional=True), Ref("BracketedColumnReferenceListGrammar", optional=True), OneOf( AnyNumberOf( Ref("ValuesClauseSegment"), min_times=1, ), Ref("SelectableGrammar"), Sequence( Ref.keyword("TABLE", optional=True), Ref("TableReferenceSegment"), ), Sequence( "FROM", Ref("TableReferenceSegment"), "SELECT", Delimited( Ref("ColumnReferenceSegment"), ), Ref("WhereClauseSegment", optional=True), Ref("GroupByClauseSegment", optional=True), Ref("OrderByClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), ), ), ) class InsertOverwriteDirectorySegment(BaseSegment): """An `INSERT OVERWRITE [LOCAL] DIRECTORY` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-dml-insert-overwrite-directory.html """ type = "insert_overwrite_directory_statement" match_grammar = Sequence( "INSERT", "OVERWRITE", Ref.keyword("LOCAL", optional=True), "DIRECTORY", Ref("QuotedLiteralSegment", optional=True), "USING", Ref("DataSourceFormatGrammar"), Ref("OptionsGrammar", optional=True), OneOf( AnyNumberOf( Ref("ValuesClauseSegment"), min_times=1, ), Ref("SelectableGrammar"), ), ) class InsertOverwriteDirectoryHiveFmtSegment(BaseSegment): """An `INSERT OVERWRITE [LOCAL] DIRECTORY` statement in Hive format. https://spark.apache.org/docs/latest/sql-ref-syntax-dml-insert-overwrite-directory-hive.html """ type = "insert_overwrite_directory_hive_fmt_statement" match_grammar = Sequence( "INSERT", "OVERWRITE", Ref.keyword("LOCAL", optional=True), "DIRECTORY", Ref("QuotedLiteralSegment"), Ref("RowFormatClauseSegment", optional=True), Ref("StoredAsGrammar", optional=True), OneOf( AnyNumberOf( Ref("ValuesClauseSegment"), min_times=1, ), Ref("SelectableGrammar"), ), ) class LoadDataSegment(BaseSegment): """A `LOAD DATA` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-dml-load.html """ type = "load_data_statement" match_grammar = Sequence( "LOAD", "DATA", Ref.keyword("LOCAL", optional=True), "INPATH", Ref("QuotedLiteralSegment"), Ref.keyword("OVERWRITE", optional=True), "INTO", "TABLE", Ref("TableReferenceSegment"), Ref("PartitionSpecGrammar", optional=True), ) # Data Retrieval Statements class ClusterByClauseSegment(BaseSegment): """A `CLUSTER BY` clause from `SELECT` statement. Equivalent to `DISTRIBUTE BY` and `SORT BY` in tandem. This clause is mutually exclusive with SORT BY, ORDER BY and DISTRIBUTE BY. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-clusterby.html """ type = "cluster_by_clause" match_grammar = StartsWith( Sequence("CLUSTER", "BY"), terminator=OneOf( "LIMIT", "HAVING", # For window functions "WINDOW", Ref("FrameClauseUnitGrammar"), "SEPARATOR", ), ) parse_grammar = Sequence( "CLUSTER", "BY", Indent, Delimited( Sequence( OneOf( Ref("ColumnReferenceSegment"), # Can `CLUSTER BY 1` Ref("NumericLiteralSegment"), # Can cluster by an expression Ref("ExpressionSegment"), ), ), terminator=OneOf( "WINDOW", "LIMIT", Ref("FrameClauseUnitGrammar"), ), ), Dedent, ) class DistributeByClauseSegment(BaseSegment): """A `DISTRIBUTE BY` clause from `SELECT` statement. This clause is mutually exclusive with SORT BY, ORDER BY and DISTRIBUTE BY. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-distribute-by.html """ type = "distribute_by_clause" match_grammar = StartsWith( Sequence("DISTRIBUTE", "BY"), terminator=OneOf( "LIMIT", "HAVING", # For window functions "WINDOW", Ref("FrameClauseUnitGrammar"), "SEPARATOR", ), ) parse_grammar = Sequence( "DISTRIBUTE", "BY", Indent, Delimited( Sequence( OneOf( Ref("ColumnReferenceSegment"), # Can `DISTRIBUTE BY 1` Ref("NumericLiteralSegment"), # Can distribute by an expression Ref("ExpressionSegment"), ), ), terminator=OneOf( "WINDOW", "LIMIT", Ref("FrameClauseUnitGrammar"), ), ), Dedent, ) class HintFunctionSegment(BaseSegment): """A Function within a SparkSQL Hint. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-hints.html """ type = "hint_function" match_grammar = Sequence( Ref("FunctionNameSegment"), Bracketed( Delimited( AnyNumberOf( Ref("SingleIdentifierGrammar"), Ref("NumericLiteralSegment"), min_times=1, ), ), # May be Bare Function unique to Hints, i.e. REBALANCE optional=True, ), ) class SelectHintSegment(BaseSegment): """Spark Select Hints. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-hints.html """ type = "select_hint" match_grammar = Sequence( Sequence( Ref("StartHintSegment"), Delimited( AnyNumberOf( Ref("HintFunctionSegment"), # At least function should be supplied min_times=1, ), terminator=Ref("EndHintSegment"), ), Ref("EndHintSegment"), ), ) class LimitClauseSegment(ansi.LimitClauseSegment): """A `LIMIT` clause like in `SELECT`. Enhanced from ANSI dialect. :: Spark does not allow explicit or implicit `OFFSET` (implicit being 1000, 20 for example) :: Spark allows an `ALL` quantifier or a function expression as an input to `LIMIT` https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-limit.html """ match_grammar = Sequence( "LIMIT", Indent, OneOf( Ref("NumericLiteralSegment"), "ALL", Ref("FunctionSegment"), ), Dedent, ) class SetOperatorSegment(ansi.SetOperatorSegment): """A set operator such as Union, Minus, Except or Intersect. Enhanced from ANSI dialect. :: Spark allows the `ALL` keyword to follow Except and Minus. :: Distinct allows the `DISTINCT` and `ALL` keywords. # https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-setops.html """ match_grammar = OneOf( Sequence( OneOf("EXCEPT", "MINUS"), Ref.keyword("ALL", optional=True), ), Sequence( OneOf("UNION", "INTERSECT"), OneOf("DISTINCT", "ALL", optional=True), ), ) class SelectClauseModifierSegment(ansi.SelectClauseModifierSegment): """Things that come after SELECT but before the columns. Enhance `SelectClauseModifierSegment` from Ansi to allow SparkSQL Hints https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-hints.html """ match_grammar = Sequence( # TODO New Rule warning of Join Hints priority if multiple specified # When different join strategy hints are specified on # both sides of a join, Spark prioritizes the BROADCAST # hint over the MERGE hint over the SHUFFLE_HASH hint # over the SHUFFLE_REPLICATE_NL hint. # # Spark will issue Warning in the following example: # # SELECT # /*+ BROADCAST(t1), MERGE(t1, t2) */ # t1.a, # t1.b, # t2.c # FROM t1 INNER JOIN t2 ON t1.key = t2.key; # # Hints should be listed in order of priority in Select Ref("SelectHintSegment", optional=True), OneOf("DISTINCT", "ALL", optional=True), ) class UnorderedSelectStatementSegment(ansi.UnorderedSelectStatementSegment): """Enhance unordered `SELECT` statement for valid SparkSQL clauses. This is designed for use in the context of set operations, for other use cases, we should use the main SelectStatementSegment. """ match_grammar = ansi.UnorderedSelectStatementSegment.match_grammar parse_grammar = ansi.UnorderedSelectStatementSegment.parse_grammar.copy( # Removing non-valid clauses that exist in ANSI dialect remove=[Ref("OverlapsClauseSegment", optional=True)] ) class SelectStatementSegment(ansi.SelectStatementSegment): """Enhance `SELECT` statement for valid SparkSQL clauses.""" match_grammar = ansi.SelectStatementSegment.match_grammar parse_grammar = ansi.SelectStatementSegment.parse_grammar.copy( # TODO New Rule: Warn of mutual exclusion of following clauses # DISTRIBUTE, SORT, CLUSTER and ORDER BY if multiple specified insert=[ Ref("ClusterByClauseSegment", optional=True), Ref("DistributeByClauseSegment", optional=True), Ref("SortByClauseSegment", optional=True), ], before=Ref("LimitClauseSegment"), ) class GroupByClauseSegment(ansi.GroupByClauseSegment): """Enhance `GROUP BY` clause like in `SELECT` for 'CUBE' and 'ROLLUP`. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-groupby.html """ match_grammar = StartsWith( Sequence("GROUP", "BY"), terminator=OneOf("ORDER", "LIMIT", "HAVING", "WINDOW"), enforce_whitespace_preceding_terminator=True, ) parse_grammar = Sequence( "GROUP", "BY", Indent, Delimited( OneOf( Ref("ColumnReferenceSegment"), # Can `GROUP BY 1` Ref("NumericLiteralSegment"), # Can `GROUP BY coalesce(col, 1)` Ref("ExpressionSegment"), Ref("CubeRollupClauseSegment"), Ref("GroupingSetsClauseSegment"), ), terminator=OneOf("ORDER", "LIMIT", "HAVING", "WINDOW"), ), # TODO: New Rule # Warn if CubeRollupClauseSegment and # WithCubeRollupClauseSegment used in same query Ref("WithCubeRollupClauseSegment", optional=True), Dedent, ) class WithCubeRollupClauseSegment(BaseSegment): """A `[WITH CUBE | WITH ROLLUP]` clause after the `GROUP BY` clause. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-groupby.html """ type = "with_cube_rollup_clause" match_grammar = Sequence( "WITH", OneOf("CUBE", "ROLLUP"), ) class CubeRollupClauseSegment(BaseSegment): """`[CUBE | ROLLUP]` clause within the `GROUP BY` clause. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-groupby.html """ type = "cube_rollup_clause" match_grammar = StartsWith( OneOf("CUBE", "ROLLUP"), terminator=OneOf( "HAVING", Sequence("ORDER", "BY"), "LIMIT", Ref("SetOperatorSegment"), ), ) parse_grammar = Sequence( OneOf("CUBE", "ROLLUP"), Bracketed( Ref("GroupingExpressionList"), ), ) class GroupingSetsClauseSegment(BaseSegment): """`GROUPING SETS` clause within the `GROUP BY` clause.""" type = "grouping_sets_clause" match_grammar = StartsWith( Sequence("GROUPING", "SETS"), terminator=OneOf( "HAVING", Sequence("ORDER", "BY"), "LIMIT", Ref("SetOperatorSegment"), ), ) parse_grammar = Sequence( "GROUPING", "SETS", Bracketed( Delimited( Ref("CubeRollupClauseSegment"), Ref("GroupingExpressionList"), Bracketed(), # Allows empty parentheses ) ), ) class GroupingExpressionList(BaseSegment): """Grouping expression list within `CUBE` / `ROLLUP` `GROUPING SETS`.""" type = "grouping_expression_list" match_grammar = Delimited( OneOf( Bracketed(Delimited(Ref("ExpressionSegment"))), Ref("ExpressionSegment"), ) ) class SortByClauseSegment(BaseSegment): """A `SORT BY` clause like in `SELECT`. This clause is mutually exclusive with SORT BY, ORDER BY and DISTRIBUTE BY. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-sortby.html """ type = "sort_by_clause" match_grammar = StartsWith( Sequence("SORT", "BY"), terminator=OneOf( "LIMIT", "HAVING", "QUALIFY", # For window functions "WINDOW", Ref("FrameClauseUnitGrammar"), "SEPARATOR", ), ) parse_grammar = Sequence( "SORT", "BY", Indent, Delimited( Sequence( OneOf( Ref("ColumnReferenceSegment"), # Can `ORDER BY 1` Ref("NumericLiteralSegment"), # Can order by an expression Ref("ExpressionSegment"), ), OneOf("ASC", "DESC", optional=True), # NB: This isn't really ANSI, and isn't supported in Mysql, # but is supported in enough other dialects for it to make # sense here for now. Sequence("NULLS", OneOf("FIRST", "LAST"), optional=True), ), terminator=OneOf( "LIMIT", Ref("FrameClauseUnitGrammar"), ), ), Dedent, ) class SamplingExpressionSegment(ansi.SamplingExpressionSegment): """A `TABLESAMPLE` clause following a table identifier. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-sampling.html """ match_grammar = Sequence( "TABLESAMPLE", OneOf( Bracketed( Ref("NumericLiteralSegment"), OneOf( "PERCENT", "ROWS", ), ), Bracketed( "BUCKET", Ref("NumericLiteralSegment"), "OUT", "OF", Ref("NumericLiteralSegment"), ), ), ) class LateralViewClauseSegment(BaseSegment): """A `LATERAL VIEW` like in a `FROM` clause. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-lateral-view.html """ type = "lateral_view_clause" match_grammar = Sequence( Indent, "LATERAL", "VIEW", Ref.keyword("OUTER", optional=True), Ref("FunctionSegment"), # NB: AliasExpressionSegment is not used here for table # or column alias because `AS` is optional within it # (and in most scenarios). Here it's explicitly defined # for when it is required and not allowed. Ref("SingleIdentifierGrammar", optional=True), Sequence( "AS", Delimited( Ref("SingleIdentifierGrammar"), ), ), Dedent, ) class OverClauseSegment(ansi.OverClauseSegment): """An OVER clause for window functions. Enhance from ANSI dialect to allow for specification of [{IGNORE | RESPECT} NULLS] https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-window.html """ match_grammar = Sequence( Sequence(OneOf("IGNORE", "RESPECT"), "NULLS", optional=True), "OVER", OneOf( Ref("SingleIdentifierGrammar"), # Window name Bracketed( Ref("WindowSpecificationSegment", optional=True), ), ), ) class PivotClauseSegment(BaseSegment): """A `PIVOT` clause as using in FROM clause. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-pivot.html """ type = "pivot_clause" match_grammar = Sequence( Indent, "PIVOT", Bracketed( Indent, Delimited( Sequence( Ref("FunctionSegment"), Ref("AliasExpressionSegment", optional=True), ), ), "FOR", OptionallyBracketed( OneOf( Ref("SingleIdentifierGrammar"), Delimited( Ref("SingleIdentifierGrammar"), ), ), ), "IN", Bracketed( Delimited( Sequence( OneOf( Bracketed( Delimited( Ref("ExpressionSegment"), ephemeral_name="ValuesClauseElements", ) ), Delimited( Ref("ExpressionSegment"), ), ), Ref("AliasExpressionSegment", optional=True), ), ), ), Dedent, ), Dedent, ) class TransformClauseSegment(BaseSegment): """A `TRANSFORM` clause like used in `SELECT`. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-transform.html """ type = "transform_clause" match_grammar = Sequence( "TRANSFORM", Bracketed( Delimited( Ref("SingleIdentifierGrammar"), ephemeral_name="TransformClauseContents", ), ), Indent, Ref("RowFormatClauseSegment", optional=True), "USING", Ref("QuotedLiteralSegment"), Sequence( "AS", Bracketed( Delimited( AnyNumberOf( Ref("SingleIdentifierGrammar"), Ref("DatatypeSegment"), ), ), ), optional=True, ), Ref("RowFormatClauseSegment", optional=True), ) class ExplainStatementSegment(ansi.ExplainStatementSegment): """An `Explain` statement. Enhanced from ANSI dialect to allow for additonal parameters. EXPLAIN [ EXTENDED | CODEGEN | COST | FORMATTED ] explainable_stmt https://spark.apache.org/docs/latest/sql-ref-syntax-qry-explain.html """ explainable_stmt = Ref("StatementSegment") match_grammar = Sequence( "EXPLAIN", OneOf( "EXTENDED", "CODEGEN", "COST", "FORMATTED", optional=True, ), explainable_stmt, ) # Auxiliary Statements class AddFileSegment(BaseSegment): """A `ADD {FILE | FILES}` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-resource-mgmt-add-file.html """ type = "add_file_statement" match_grammar = Sequence( "ADD", Ref("FileKeywordSegment"), AnyNumberOf(Ref("QuotedLiteralSegment")), ) class AddJarSegment(BaseSegment): """A `ADD {JAR | JARS}` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-resource-mgmt-add-jar.html """ type = "add_jar_statement" match_grammar = Sequence( "ADD", Ref("JarKeywordSegment"), AnyNumberOf(Ref("QuotedLiteralSegment")), ) class AnalyzeTableSegment(BaseSegment): """An `ANALYZE {TABLE | TABLES}` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-analyze-table.html """ type = "analyze_table_statement" match_grammar = Sequence( "ANALYZE", OneOf( Sequence( "TABLE", Ref("TableReferenceSegment"), Ref( "PartitionSpecGrammar", optional=True, ), "COMPUTE", "STATISTICS", OneOf( "NOSCAN", Sequence( "FOR", "COLUMNS", OptionallyBracketed( Delimited( Ref( "ColumnReferenceSegment", ), ), ), ), optional=True, ), ), Sequence( "TABLES", Sequence( OneOf( "FROM", "IN", ), Ref( "DatabaseReferenceSegment", ), optional=True, ), "COMPUTE", "STATISTICS", Ref.keyword( "NOSCAN", optional=True, ), ), ), ) class CacheTableSegment(BaseSegment): """A `CACHE TABLE` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-cache-cache-table.html """ type = "cache_table" match_grammar = Sequence( "CACHE", Ref.keyword("LAZY", optional=True), "TABLE", Ref("TableReferenceSegment"), Ref("OptionsGrammar", optional=True), Ref.keyword("AS", optional=True), Ref("SelectableGrammar"), ) class ClearCacheSegment(BaseSegment): """A `CLEAR CACHE` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-cache-clear-cache.html """ type = "clear_cache" match_grammar = Sequence( "CLEAR", "CACHE", ) class DescribeStatementSegment(BaseSegment): """A `DESCRIBE` statement. This class provides coverage for databases, tables, functions, and queries. NB: These are similar enough that it makes sense to include them in a common class, especially since there wouldn't be any specific rules that would apply to one describe vs another, but they could be broken out to one class per describe statement type. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-describe-database.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-describe-function.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-describe-query.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-describe-table.html """ type = "describe_statement" match_grammar = Sequence( OneOf("DESCRIBE", "DESC"), OneOf( Sequence( "DATABASE", Ref.keyword("EXTENDED", optional=True), Ref("DatabaseReferenceSegment"), ), Sequence( "FUNCTION", Ref.keyword("EXTENDED", optional=True), Ref("FunctionNameSegment"), ), Sequence( Ref.keyword("TABLE", optional=True), Ref.keyword("EXTENDED", optional=True), Ref("TableReferenceSegment"), Ref("PartitionSpecGrammar", optional=True), # can be fully qualified column after table is listed # [database.][table.][column] Sequence( Ref("SingleIdentifierGrammar"), AnyNumberOf( Sequence( Ref("DotSegment"), Ref("SingleIdentifierGrammar"), allow_gaps=False, ), max_times=2, allow_gaps=False, ), optional=True, allow_gaps=False, ), ), Sequence( Ref.keyword("QUERY", optional=True), OneOf( Sequence( "TABLE", Ref("TableReferenceSegment"), ), Sequence( "FROM", Ref("TableReferenceSegment"), "SELECT", Delimited( Ref("ColumnReferenceSegment"), ), Ref("WhereClauseSegment", optional=True), Ref("GroupByClauseSegment", optional=True), Ref("OrderByClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), ), Ref("StatementSegment"), ), ), ), ) class ListFileSegment(BaseSegment): """A `LIST {FILE | FILES}` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-resource-mgmt-list-file.html """ type = "list_file_statement" match_grammar = Sequence( "LIST", Ref("FileKeywordSegment"), AnyNumberOf(Ref("QuotedLiteralSegment")), ) class ListJarSegment(BaseSegment): """A `ADD {JAR | JARS}` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-resource-mgmt-add-jar.html """ type = "list_jar_statement" match_grammar = Sequence( "LIST", Ref("JarKeywordSegment"), AnyNumberOf(Ref("QuotedLiteralSegment")), ) class RefreshStatementSegment(BaseSegment): """A `REFRESH` statement for given data source path. NB: These are similar enough that it makes sense to include them in a common class, especially since there wouldn't be any specific rules that would apply to one refresh vs another, but they could be broken out to one class per refresh statement type. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-cache-refresh.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-cache-refresh-table.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-cache-refresh-function.html """ type = "refresh_statement" match_grammar = Sequence( "REFRESH", OneOf( Ref("QuotedLiteralSegment"), Sequence( Ref.keyword("TABLE", optional=True), Ref("TableReferenceSegment"), ), Sequence( "FUNCTION", Ref("FunctionNameSegment"), ), ), ) class ResetStatementSegment(BaseSegment): """A `RESET` statement used to reset runtime configurations. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-conf-mgmt-reset.html """ type = "reset_statement" match_grammar = Sequence( "RESET", Delimited( Ref("SingleIdentifierGrammar"), delimiter=Ref("DotSegment"), optional=True, ), ) class SetStatementSegment(BaseSegment): """A `SET` statement used to set runtime properties. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-conf-mgmt-set.html """ type = "set_statement" match_grammar = Sequence( "SET", Ref("SQLConfPropertiesSegment", optional=True), OneOf( Ref("PropertyListGrammar"), Ref("PropertyNameSegment"), optional=True, ), ) class ShowStatement(BaseSegment): """Common class for `SHOW` statements. NB: These are similar enough that it makes sense to include them in a common class, especially since there wouldn't be any specific rules that would apply to one show vs another, but they could be broken out to one class per show statement type. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-show-columns.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-show-create-table.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-show-databases.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-show-functions.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-show-partitions.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-show-table.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-show-tables.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-show-tblproperties.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-show-views.html """ type = "show_statement" match_grammar = Sequence( "SHOW", OneOf( # SHOW CREATE TABLE Sequence( "CREATE", "TABLE", Ref("TableExpressionSegment"), Sequence( "AS", "SERDE", optional=True, ), ), # SHOW COLUMNS Sequence( "COLUMNS", "IN", Ref("TableExpressionSegment"), Sequence( "IN", Ref("DatabaseReferenceSegment"), optional=True, ), ), # SHOW { DATABASES | SCHEMAS } Sequence( OneOf("DATABASES", "SCHEMAS"), Sequence( "LIKE", Ref("QuotedLiteralSegment"), optional=True, ), ), # SHOW FUNCTIONS Sequence( OneOf("USER", "SYSTEM", "ALL", optional=True), "FUNCTIONS", OneOf( # qualified function from a database Sequence( Ref("DatabaseReferenceSegment"), Ref("DotSegment"), Ref("FunctionNameSegment"), allow_gaps=False, optional=True, ), # non-qualified function Ref("FunctionNameSegment", optional=True), Sequence( "LIKE", Ref("QuotedLiteralSegment"), optional=True, ), ), ), # SHOW PARTITIONS Sequence( "PARTITIONS", Ref("TableReferenceSegment"), Ref("PartitionSpecGrammar", optional=True), ), # SHOW TABLE Sequence( "TABLE", "EXTENDED", Sequence( OneOf("IN", "FROM"), Ref("DatabaseReferenceSegment"), optional=True, ), "LIKE", Ref("QuotedLiteralSegment"), Ref("PartitionSpecGrammar", optional=True), ), # SHOW TABLES Sequence( "TABLES", Sequence( OneOf("FROM", "IN"), Ref("DatabaseReferenceSegment"), optional=True, ), Sequence( "LIKE", Ref("QuotedLiteralSegment"), optional=True, ), ), # SHOW TBLPROPERTIES Sequence( "TBLPROPERTIES", Ref("TableReferenceSegment"), Ref("BracketedPropertyNameListGrammar", optional=True), ), # SHOW VIEWS Sequence( "VIEWS", Sequence( OneOf("FROM", "IN"), Ref("DatabaseReferenceSegment"), optional=True, ), Sequence( "LIKE", Ref("QuotedLiteralSegment"), optional=True, ), ), ), ) class UncacheTableSegment(BaseSegment): """AN `UNCACHE TABLE` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-cache-uncache-table.html """ type = "uncache_table" match_grammar = Sequence( "UNCACHE", "TABLE", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), ) class StatementSegment(ansi.StatementSegment): """Overriding StatementSegment to allow for additional segment parsing.""" match_grammar = ansi.StatementSegment.match_grammar parse_grammar = ansi.StatementSegment.parse_grammar.copy( # Segments defined in Spark3 dialect insert=[ # Data Definition Statements Ref("AlterDatabaseStatementSegment"), Ref("AlterTableStatementSegment"), Ref("AlterViewStatementSegment"), Ref("CreateHiveFormatTableStatementSegment"), Ref("DropFunctionStatementSegment"), Ref("MsckRepairTableStatementSegment"), Ref("UseDatabaseStatementSegment"), # Auxiliary Statements Ref("AddFileSegment"), Ref("AddJarSegment"), Ref("AnalyzeTableSegment"), Ref("CacheTableSegment"), Ref("ClearCacheSegment"), Ref("ListFileSegment"), Ref("ListJarSegment"), Ref("RefreshStatementSegment"), Ref("ResetStatementSegment"), Ref("SetStatementSegment"), Ref("ShowStatement"), Ref("UncacheTableSegment"), # Data Manipulation Statements Ref("InsertOverwriteDirectorySegment"), Ref("InsertOverwriteDirectoryHiveFmtSegment"), Ref("LoadDataSegment"), # Data Retrieval Statements Ref("ClusterByClauseSegment"), Ref("DistributeByClauseSegment"), ], remove=[ Ref("TransactionStatementSegment"), Ref("CreateSchemaStatementSegment"), Ref("SetSchemaStatementSegment"), Ref("CreateExtensionStatementSegment"), Ref("CreateModelStatementSegment"), Ref("DropModelStatementSegment"), ], ) class JoinClauseSegment(ansi.JoinClauseSegment): """Any number of join clauses, including the `JOIN` keyword. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-join.html """ match_grammar = OneOf( # NB These qualifiers are optional # TODO: Allow nested joins like: # ....FROM S1.T1 t1 LEFT JOIN ( S2.T2 t2 JOIN S3.T3 t3 ON t2.col1=t3.col1) ON # tab1.col1 = tab2.col1 Sequence( Ref("JoinTypeKeywords", optional=True), Ref("JoinKeywordsGrammar"), Indent, Sequence( Ref("FromExpressionElementSegment"), Conditional(Dedent, indented_using_on=False), # NB: this is optional OneOf( # ON clause Ref("JoinOnConditionSegment"), # USING clause Sequence( "USING", Indent, Bracketed( # NB: We don't use BracketedColumnReferenceListGrammar # here because we're just using SingleIdentifierGrammar, # rather than ObjectReferenceSegment or # ColumnReferenceSegment. This is a) so that we don't # lint it as a reference and b) because the column will # probably be returned anyway during parsing. Delimited( Ref("SingleIdentifierGrammar"), ephemeral_name="UsingClauseContents", ) ), Dedent, ), # Unqualified joins *are* allowed. They just might not # be a good idea. optional=True, ), Conditional(Indent, indented_using_on=False), ), Dedent, ), # Note NATURAL joins do not support Join conditions Sequence( Ref("NaturalJoinKeywordsGrammar"), Ref("JoinKeywordsGrammar"), Indent, Ref("FromExpressionElementSegment"), Dedent, ), ) class AliasExpressionSegment(ansi.AliasExpressionSegment): """A reference to an object with an `AS` clause. The optional AS keyword allows both implicit and explicit aliasing. Note also that it's possible to specify just column aliases without aliasing the table as well: .. code-block:: sql SELECT * FROM VALUES (1,2) as t (a, b); SELECT * FROM VALUES (1,2) as (a, b); SELECT * FROM VALUES (1,2) as t; Note that in Spark SQL, identifiers are quoted using backticks (`my_table`) rather than double quotes ("my_table"). Quoted identifiers are allowed in aliases, but unlike ANSI which allows single quoted identifiers ('my_table') in aliases, this is not allowed in Spark and so the definition of this segment must depart from ANSI. """ match_grammar = Sequence( Ref.keyword("AS", optional=True), OneOf( # maybe table alias and column aliases Sequence( Ref("SingleIdentifierGrammar", optional=True), Bracketed(Ref("SingleIdentifierListSegment")), ), # just a table alias Ref("SingleIdentifierGrammar"), exclude=OneOf( "LATERAL", Ref("JoinTypeKeywords"), "WINDOW", "PIVOT", ), ), ) class ValuesClauseSegment(ansi.ValuesClauseSegment): """A `VALUES` clause, as typically used with `INSERT` or `SELECT`. The Spark SQL reference does not mention `VALUES` clauses except in the context of `INSERT` statements. However, they appear to behave much the same as in `postgres <https://www.postgresql.org/docs/14/sql-values.html>`. In short, they can appear anywhere a `SELECT` can, and also as bare `VALUES` statements. Here are some examples: .. code-block:: sql VALUES 1,2 LIMIT 1; SELECT * FROM VALUES (1,2) as t (a,b); SELECT * FROM (VALUES (1,2) as t (a,b)); WITH a AS (VALUES 1,2) SELECT * FROM a; """ match_grammar = Sequence( "VALUES", Delimited( OneOf( Bracketed( Delimited( # NULL keyword used in # INSERT INTO statement. "NULL", Ref("ExpressionSegment"), ephemeral_name="ValuesClauseElements", ) ), "NULL", Ref("ExpressionSegment"), exclude=OneOf("VALUES"), ), ), # LIMIT/ORDER are unreserved in sparksql. Ref( "AliasExpressionSegment", exclude=OneOf("LIMIT", "ORDER"), optional=True, ), Ref("OrderByClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), ) class TableExpressionSegment(ansi.TableExpressionSegment): """The main table expression e.g. within a FROM clause. Enhance to allow for additional clauses allowed in Spark and Delta Lake. """ match_grammar = OneOf( Ref("ValuesClauseSegment"), Ref("BareFunctionSegment"), Ref("FunctionSegment"), Sequence( OneOf( Ref("FileReferenceSegment"), Ref("TableReferenceSegment"), ), OneOf( Ref("AtSignLiteralSegment"), Sequence( Indent, OneOf( Ref("TimestampAsOfGrammar"), Ref("VersionAsOfGrammar"), ), Dedent, ), optional=True, ), ), # Nested Selects Bracketed(Ref("SelectableGrammar")), ) class FileReferenceSegment(BaseSegment): """A reference to a file for direct query. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-file.html """ type = "file_reference" match_grammar = Sequence( Ref("DataSourcesV2FileTypeGrammar"), Ref("DotSegment"), # NB: Using `QuotedLiteralSegment` here causes `FileReferenceSegment` # to match as a `TableReferenceSegment` Ref("BackQuotedIdentifierSegment"), ) class FromExpressionElementSegment(ansi.FromExpressionElementSegment): """A table expression. Enhanced from ANSI to allow for `LATERAL VIEW` clause """ match_grammar = Sequence( Ref("PreTableFunctionKeywordsGrammar", optional=True), OptionallyBracketed(Ref("TableExpressionSegment")), Ref( "AliasExpressionSegment", exclude=Ref("SamplingExpressionSegment"), optional=True, ), Ref("SamplingExpressionSegment", optional=True), # NB: `LateralViewClauseSegment`, `NamedWindowSegment`, # and `PivotClauseSegment should come after Alias/Sampling # expressions so those are matched before AnyNumberOf(Ref("LateralViewClauseSegment")), Ref("NamedWindowSegment", optional=True), Ref("PivotClauseSegment", optional=True), Ref("PostTableExpressionGrammar", optional=True), ) class PropertyNameSegment(BaseSegment): """A segment for a property name to set and retrieve table and runtime properties. https://spark.apache.org/docs/latest/configuration.html#application-properties """ type = "property_name_identifier" match_grammar = Sequence( OneOf( Delimited( Ref("PropertiesNakedIdentifierSegment"), delimiter=Ref("DotSegment"), allow_gaps=False, ), Ref("SingleIdentifierGrammar"), ), ) class GeneratedColumnDefinitionSegment(BaseSegment): """A generated column definition, e.g. for CREATE TABLE or ALTER TABLE. https://docs.delta.io/latest/delta-batch.html#use-generated-columns """ type = "generated_column_definition" match_grammar: Matchable = Sequence( Ref("SingleIdentifierGrammar"), # Column name Ref("DatatypeSegment"), # Column type Bracketed(Anything(), optional=True), # For types like VARCHAR(100) Sequence( "GENERATED", "ALWAYS", "AS", Bracketed( OneOf( Ref("FunctionSegment"), Ref("BareFunctionSegment"), ), ), ), AnyNumberOf( Ref("ColumnConstraintSegment", optional=True), ), ) class MergeUpdateClauseSegment(ansi.MergeUpdateClauseSegment): """`UPDATE` clause within the `MERGE` statement.""" type = "merge_update_clause" match_grammar: Matchable = Sequence( "UPDATE", OneOf( Sequence("SET", Ref("WildcardIdentifierSegment")), Sequence( Indent, Ref("SetClauseListSegment"), Dedent, ), ), ) class MergeInsertClauseSegment(ansi.MergeInsertClauseSegment): """`INSERT` clause within the `MERGE` statement.""" type = "merge_insert_clause" match_grammar: Matchable = Sequence( "INSERT", OneOf( Ref("WildcardIdentifierSegment"), Sequence( Indent, Ref("BracketedColumnReferenceListGrammar"), Dedent, Indent, Ref("ValuesClauseSegment"), Dedent, ), ), )
30.70639
137
0.534277
709835a92ae19eda5d42775714ddb2d5eeddb919
3,886
py
Python
venv/lib/python3.6/site-packages/ansible_collections/community/network/plugins/modules/network/avi/avi_errorpagebody.py
usegalaxy-no/usegalaxy
75dad095769fe918eb39677f2c887e681a747f3a
[ "MIT" ]
1
2020-01-22T13:11:23.000Z
2020-01-22T13:11:23.000Z
venv/lib/python3.6/site-packages/ansible_collections/community/network/plugins/modules/network/avi/avi_errorpagebody.py
usegalaxy-no/usegalaxy
75dad095769fe918eb39677f2c887e681a747f3a
[ "MIT" ]
12
2020-02-21T07:24:52.000Z
2020-04-14T09:54:32.000Z
venv/lib/python3.6/site-packages/ansible_collections/community/network/plugins/modules/network/avi/avi_errorpagebody.py
usegalaxy-no/usegalaxy
75dad095769fe918eb39677f2c887e681a747f3a
[ "MIT" ]
null
null
null
#!/usr/bin/python # # @author: Gaurav Rastogi (grastogi@avinetworks.com) # Eric Anderson (eanderson@avinetworks.com) # module_check: supported # # Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = ''' --- module: avi_errorpagebody author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com> short_description: Module for setup of ErrorPageBody Avi RESTful Object description: - This module is used to configure ErrorPageBody object - more examples at U(https://github.com/avinetworks/devops) requirements: [ avisdk ] options: state: description: - The state that should be applied on the entity. default: present choices: ["absent", "present"] avi_api_update_method: description: - Default method for object update is HTTP PUT. - Setting to patch will override that behavior to use HTTP PATCH. default: put choices: ["put", "patch"] avi_api_patch_op: description: - Patch operation to use when using avi_api_update_method as patch. choices: ["add", "replace", "delete"] error_page_body: description: - Error page body sent to client when match. - Field introduced in 17.2.4. format: description: - Format of an error page body html or json. - Enum options - ERROR_PAGE_FORMAT_HTML, ERROR_PAGE_FORMAT_JSON. - Field introduced in 18.2.3. - Default value when not specified in API or module is interpreted by Avi Controller as ERROR_PAGE_FORMAT_HTML. name: description: - Field introduced in 17.2.4. required: true tenant_ref: description: - It is a reference to an object of type tenant. - Field introduced in 17.2.4. url: description: - Avi controller URL of the object. uuid: description: - Field introduced in 17.2.4. extends_documentation_fragment: - community.network.avi ''' EXAMPLES = """ - name: Example to create ErrorPageBody object community.network.avi_errorpagebody: controller: 10.10.25.42 username: admin password: something state: present name: sample_errorpagebody """ RETURN = ''' obj: description: ErrorPageBody (api/errorpagebody) object returned: success, changed type: dict ''' from ansible.module_utils.basic import AnsibleModule try: from ansible_collections.community.network.plugins.module_utils.network.avi.avi import ( avi_common_argument_spec, avi_ansible_api, HAS_AVI) except ImportError: HAS_AVI = False def main(): argument_specs = dict( state=dict(default='present', choices=['absent', 'present']), avi_api_update_method=dict(default='put', choices=['put', 'patch']), avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), error_page_body=dict(type='str',), format=dict(type='str',), name=dict(type='str', required=True), tenant_ref=dict(type='str',), url=dict(type='str',), uuid=dict(type='str',), ) argument_specs.update(avi_common_argument_spec()) module = AnsibleModule( argument_spec=argument_specs, supports_check_mode=True) if not HAS_AVI: return module.fail_json(msg=( 'Avi python API SDK (avisdk>=17.1) or requests is not installed. ' 'For more details visit https://github.com/avinetworks/sdk.')) return avi_ansible_api(module, 'errorpagebody', set([])) if __name__ == '__main__': main()
32.115702
123
0.645651
74c47c59e8759797f336932bd5d09cbbf7125c96
8,767
py
Python
yotta/install.py
microbit-foundation/yotta
82d854b43d391abb5a006b05e7beffe7d0d6ffbf
[ "Apache-2.0" ]
176
2015-01-02T07:31:59.000Z
2022-03-21T12:40:02.000Z
yotta/install.py
microbit-foundation/yotta
82d854b43d391abb5a006b05e7beffe7d0d6ffbf
[ "Apache-2.0" ]
549
2015-01-05T16:19:54.000Z
2021-01-15T13:46:42.000Z
yotta/install.py
microbit-foundation/yotta
82d854b43d391abb5a006b05e7beffe7d0d6ffbf
[ "Apache-2.0" ]
84
2015-01-10T21:01:00.000Z
2022-03-24T16:04:42.000Z
# Copyright 2014-2015 ARM Limited # # Licensed under the Apache License, Version 2.0 # See LICENSE file for details. # standard library modules, , , import argparse import logging import os # Component, , represents an installed component, internal from yotta.lib import component # access, , get components, internal from yotta.lib import access # access, , get components, internal from yotta.lib import access_common # folders, , get places to install things, internal from yotta.lib import folders # --config option, , , internal from yotta import options def addOptions(parser): options.config.addTo(parser) parser.add_argument('component', default=None, nargs='?', help='If specified, install this module instead of installing '+ 'the dependencies of the current module.' ) parser.add_argument('--test-dependencies', dest='install_test_deps', choices=('none', 'all', 'own'), default='own', help='Control the installation of dependencies necessary for building tests.' ) group = parser.add_mutually_exclusive_group() group.add_argument('--global', '-g', dest='act_globally', default=False, action='store_true', help='Install globally instead of in the current working directory.' ) # Deprecated options, these now do nothing! --save behavior is the default, # and --save-target has been removed. group.add_argument('--save', dest='save', action='store_true', default=False, help=argparse.SUPPRESS ) group.add_argument('--save-target', dest='save_target', action='store_true', default=False, help=argparse.SUPPRESS ) def execCommand(args, following_args): if not hasattr(args, 'install_test_deps'): vars(args)['install_test_deps'] = 'none' if getattr(args, 'save', None): logging.warning('the --save option is now the default and is ignored. It will be removed soon.') if getattr(args, 'save_target', None): logging.warning('the --save-target is now ignored. It will be removed soon.') cwd = os.getcwd() c = component.Component(cwd) if args.component is None: return installDeps(args, c) elif c or c.exists(): return installComponentAsDependency(args, c) else: return installComponent(args) def checkPrintStatus(errors, components, top_component, target): status = 0 for error in errors: logging.error(error) status = 1 for c in list(components.values()) + [top_component]: if c and c.getError(): logging.error('%s %s', c.getName(), c.getError()) status = 1 leaf_target = None if target and target.hierarchy: for t in target.hierarchy: if not leaf_target: leaf_target = t if t and t.getError(): if t is leaf_target: logging.error('target %s %s', t.getName(), t.getError()) else: logging.error('base target %s of %s %s', t.getName(), leaf_target.getName(), t.getError()) status = 1 return status def installDeps(args, current_component): # settings, , load and save settings, internal from yotta.lib import settings logging.debug('install deps for %s' % current_component) if not current_component: logging.debug(str(current_component.getError())) logging.error('The current directory does not contain a valid module.') return 1 # warn if the target hasn't been explicitly specified when running a build: # this is likely user-error if not settings.getProperty('build', 'targetSetExplicitly') and not \ getattr(args, '_target_set_explicitly', False): logging.warning( 'The build target has not been set, so the default (%s) is being ' + 'used. You can use `yotta target <targetname>` to set the build ' + 'target. See http://docs.yottabuild.org/tutorial/targets.html for ' 'more information on using targets.', args.target ) target, errors = current_component.satisfyTarget(args.target, additional_config=args.config) if errors: for error in errors: logging.error(error) return 1 if args.act_globally: # the npm behaviour here would be to install the working directory # module into the global modules dir raise NotImplementedError() else: # satisfyDependenciesRecursive will always prefer to install # dependencies in the yotta_modules directory of the top-level module, # so it's safe to set traverse_links here when we're only *installing* # modules (not updating them). This will never result in # Spooky-Action-Through-A-Symlink. components, errors = current_component.satisfyDependenciesRecursive( target = target, traverse_links = True, available_components = [(current_component.getName(), current_component)], test = {'own':'toplevel', 'all':True, 'none':False}[args.install_test_deps] ) return checkPrintStatus(errors, components, current_component, target) def installComponentAsDependency(args, current_component): logging.debug('install component %s as dependency of %s' % (args.component, current_component)) if not current_component: logging.debug(str(current_component.getError())) logging.error('The current directory does not contain a valid module.') return -1 target, errors = current_component.satisfyTarget(args.target, additional_config=args.config) if errors: for error in errors: logging.error(error) return 1 modules_dir = current_component.modulesPath() from yotta.lib import sourceparse # check if we have both a name and specification component_name, component_spec = sourceparse.parseModuleNameAndSpec(args.component) logging.info('%s, %s', component_name, component_spec) if component_name == current_component.getName(): logging.error('will not install module %s as a dependency of itself', component_name) return -1 try: installed = access.satisfyVersion( component_name, component_spec, available = {current_component.getName():current_component}, search_paths = [modules_dir], working_directory = modules_dir ) except access_common.AccessException as e: logging.error(e) return 1 # We always add the component to the dependencies of the current component # (if it is not already present), and write that back to disk. Without # writing to disk the dependency wouldn't be usable. if installed and not current_component.hasDependency(component_name): vs = sourceparse.parseSourceURL(component_spec) if vs.source_type == 'registry': saved_spec = current_component.saveDependency(installed) else: saved_spec = current_component.saveDependency(installed, component_spec) current_component.writeDescription() logging.info('dependency %s: %s written to module.json', component_name, saved_spec) else: logging.info('dependency %s is already present in module.json', component_name) # !!! should only install dependencies necessary for the one thing that # we're installing (but existing components should be made available to # satisfy dependencies) components, errors = current_component.satisfyDependenciesRecursive( target = target, available_components = [(current_component.getName(), current_component)], test = {'own':'toplevel', 'all':True, 'none':False}[args.install_test_deps] ) return checkPrintStatus(errors, components, current_component, target) def installComponent(args): path = folders.globalInstallDirectory() if args.act_globally else os.getcwd() logging.debug('install component %s to %s' % (args.component, path)) from yotta.lib import sourceparse # check if we have both a name and specification component_name, component_spec = sourceparse.parseModuleNameAndSpec(args.component) try: access.satisfyVersion( component_name, component_spec, available = dict(), search_paths = [path], working_directory = path ) except access_common.AccessException as e: logging.error('%s', e) return 1 os.chdir(component_name) return installDeps(args, component.Component(os.getcwd()))
40.96729
110
0.662712
2513b0739bc0a6d8ad20d701e7c34e2e755643fd
7,123
py
Python
addons/io_scene_gltf2/blender/exp/gltf2_blender_gather_skins.py
emackey/glTF-Blender-IO
3ab37ba38a3ae483d69a029f979286ded8b9b94b
[ "Apache-2.0" ]
null
null
null
addons/io_scene_gltf2/blender/exp/gltf2_blender_gather_skins.py
emackey/glTF-Blender-IO
3ab37ba38a3ae483d69a029f979286ded8b9b94b
[ "Apache-2.0" ]
null
null
null
addons/io_scene_gltf2/blender/exp/gltf2_blender_gather_skins.py
emackey/glTF-Blender-IO
3ab37ba38a3ae483d69a029f979286ded8b9b94b
[ "Apache-2.0" ]
null
null
null
# Copyright 2018-2019 The glTF-Blender-IO authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mathutils from . import gltf2_blender_export_keys from io_scene_gltf2.blender.exp.gltf2_blender_gather_cache import cached from io_scene_gltf2.io.com import gltf2_io from io_scene_gltf2.io.exp import gltf2_io_binary_data from io_scene_gltf2.io.com import gltf2_io_constants from io_scene_gltf2.blender.exp import gltf2_blender_gather_accessors from io_scene_gltf2.blender.exp import gltf2_blender_gather_joints from io_scene_gltf2.blender.com import gltf2_blender_math from io_scene_gltf2.io.exp.gltf2_io_user_extensions import export_user_extensions @cached def gather_skin(blender_object, export_settings): """ Gather armatures, bones etc into a glTF2 skin object. :param blender_object: the object which may contain a skin :param export_settings: :return: a glTF2 skin object """ if not __filter_skin(blender_object, export_settings): return None skin = gltf2_io.Skin( extensions=__gather_extensions(blender_object, export_settings), extras=__gather_extras(blender_object, export_settings), inverse_bind_matrices=__gather_inverse_bind_matrices(blender_object, export_settings), joints=__gather_joints(blender_object, export_settings), name=__gather_name(blender_object, export_settings), skeleton=__gather_skeleton(blender_object, export_settings) ) export_user_extensions('gather_skin_hook', export_settings, skin, blender_object) return skin def __filter_skin(blender_object, export_settings): if not export_settings[gltf2_blender_export_keys.SKINS]: return False if blender_object.type != 'ARMATURE' or len(blender_object.pose.bones) == 0: return False return True def __gather_extensions(blender_object, export_settings): return None def __gather_extras(blender_object, export_settings): return None def __gather_inverse_bind_matrices(blender_object, export_settings): axis_basis_change = mathutils.Matrix.Identity(4) if export_settings[gltf2_blender_export_keys.YUP]: axis_basis_change = mathutils.Matrix( ((1.0, 0.0, 0.0, 0.0), (0.0, 0.0, 1.0, 0.0), (0.0, -1.0, 0.0, 0.0), (0.0, 0.0, 0.0, 1.0))) if export_settings['gltf_def_bones'] is False: # build the hierarchy of nodes out of the bones root_bones = [] for blender_bone in blender_object.pose.bones: if not blender_bone.parent: root_bones.append(blender_bone) else: _, children_, root_bones = get_bone_tree(None, blender_object) matrices = [] # traverse the matrices in the same order as the joints and compute the inverse bind matrix def __collect_matrices(bone): inverse_bind_matrix = gltf2_blender_math.multiply( axis_basis_change, gltf2_blender_math.multiply( blender_object.matrix_world, bone.bone.matrix_local ) ).inverted() matrices.append(inverse_bind_matrix) if export_settings['gltf_def_bones'] is False: for child in bone.children: __collect_matrices(child) else: if bone.name in children_.keys(): for child in children_[bone.name]: __collect_matrices(blender_object.pose.bones[child]) # start with the "root" bones and recurse into children, in the same ordering as the how joints are gathered for root_bone in root_bones: __collect_matrices(root_bone) # flatten the matrices inverse_matrices = [] for matrix in matrices: for column in range(0, 4): for row in range(0, 4): inverse_matrices.append(matrix[row][column]) binary_data = gltf2_io_binary_data.BinaryData.from_list(inverse_matrices, gltf2_io_constants.ComponentType.Float) return gltf2_blender_gather_accessors.gather_accessor( binary_data, gltf2_io_constants.ComponentType.Float, len(inverse_matrices) // gltf2_io_constants.DataType.num_elements(gltf2_io_constants.DataType.Mat4), None, None, gltf2_io_constants.DataType.Mat4, export_settings ) def __gather_joints(blender_object, export_settings): root_joints = [] if export_settings['gltf_def_bones'] is False: # build the hierarchy of nodes out of the bones for blender_bone in blender_object.pose.bones: if not blender_bone.parent: root_joints.append(gltf2_blender_gather_joints.gather_joint(blender_bone, export_settings)) else: _, children_, root_joints = get_bone_tree(None, blender_object) root_joints = [gltf2_blender_gather_joints.gather_joint(i, export_settings) for i in root_joints] # joints is a flat list containing all nodes belonging to the skin joints = [] def __collect_joints(node): joints.append(node) if export_settings['gltf_def_bones'] is False: for child in node.children: __collect_joints(child) else: if node.name in children_.keys(): for child in children_[node.name]: __collect_joints(gltf2_blender_gather_joints.gather_joint(blender_object.pose.bones[child], export_settings)) for joint in root_joints: __collect_joints(joint) return joints def __gather_name(blender_object, export_settings): return blender_object.name def __gather_skeleton(blender_object, export_settings): # In the future support the result of https://github.com/KhronosGroup/glTF/pull/1195 return None # gltf2_blender_gather_nodes.gather_node(blender_object, blender_scene, export_settings) @cached def get_bone_tree(blender_dummy, blender_object): bones = [] children = {} root_bones = [] def get_parent(bone): bones.append(bone.name) if bone.parent is not None: if bone.parent.name not in children.keys(): children[bone.parent.name] = [] children[bone.parent.name].append(bone.name) get_parent(bone.parent) else: root_bones.append(bone.name) for bone in [b for b in blender_object.data.bones if b.use_deform is True]: get_parent(bone) # remove duplicates for k, v in children.items(): children[k] = list(set(v)) list_ = list(set(bones)) root_ = list(set(root_bones)) return [blender_object.data.bones[b] for b in list_], children, [blender_object.pose.bones[b] for b in root_]
37.098958
129
0.707146
6d8429ae302e6611b0c5d50979d843edf3528fad
3,498
py
Python
src/module_08/code_01.py
abelatnvidia/IntroTF
35216d1c149e2cfbf4a3374a2871d8119e585870
[ "Apache-2.0" ]
null
null
null
src/module_08/code_01.py
abelatnvidia/IntroTF
35216d1c149e2cfbf4a3374a2871d8119e585870
[ "Apache-2.0" ]
null
null
null
src/module_08/code_01.py
abelatnvidia/IntroTF
35216d1c149e2cfbf4a3374a2871d8119e585870
[ "Apache-2.0" ]
null
null
null
import os, json, tensorflow as tf def _int64_feature(value): # make sure to always encapsulate as list if not isinstance(value, list): value = [value] # we're done return tf.train.Feature(int64_list=tf.train.Int64List(value=value)) def _bytes_feature(value): # make sure convert to bytes same python 2 as python 3 value = tf.compat.as_bytes(value) # we're done return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def _float_feature(value): # make sure to always encapsulate as list if not isinstance(value, list): value = [value] # we're done return tf.train.Feature(float_list=tf.train.FloatList(value=value)) # coco image id image_id = '337975' # define a relative path to the coco annotations and image json_annotations_file_path = os.path.join('data', 'coco', image_id) + '.json' coco_image_file_path = os.path.join('data', 'coco', image_id) + '.jpeg' # slurp up the data file with open(json_annotations_file_path) as fdat: data = json.load(fdat) # blab about the data type print('imported json annotations as type: {}'.format(type(data))) # print the number of annotations listed print('number of annotations imported: {}'.format(len(data['annotations']))) # init list of bbox coords xmin = list(); ymin = list(); xmax = list(); ymax = list() # init list of object labels for each bounding box labels = list() # iterate over annotations and generate list of bbox coords for annotation in data['annotations']: # get the category for this bounding box labels.append(annotation['category_id']) # get the coordinates for this object bbox # hm ... the coco format might actually be # [xmin, ymin, xmax, ymax] ymin.append(annotation['bbox'][0]) xmin.append(annotation['bbox'][1]) ymax.append(annotation['bbox'][2]) xmax.append(annotation['bbox'][3]) # get image_buffer with tf.gfile.FastGFile(coco_image_file_path, 'rb') as f: image_data = f.read() # decode the binary data as a jpg image and return a tensor image_tensor_op = tf.image.decode_jpeg(image_data, channels=3) # execute the image tensor op to get the numpy array with tf.Session() as sess: image_numpy = sess.run(image_tensor_op) print('image_numpy shape: {}'.format(image_numpy.shape)) # pack image metadata, label, and data together as a single training example example = tf.train.Example( features=tf.train.Features( feature={ 'image/object/bbox/xmin': _float_feature(xmin), 'image/object/bbox/xmax': _float_feature(xmax), 'image/object/bbox/ymin': _float_feature(ymin), 'image/object/bbox/ymax': _float_feature(ymax), 'image/object/label' : _int64_feature(labels), 'image/encoded' : _bytes_feature(image_numpy.tobytes()) } ) ) # any name will work tf_record_file_name = image_id+'.tfrecord' # init a TFRecord writer tf_record_writer = tf.python_io.TFRecordWriter(tf_record_file_name) # serialize the example to a string (bytes) and write tf_record_writer.write(example.SerializeToString()) # don't forget to clean up (!!) tf_record_writer.close() ''' Most of this is exactly the same as our previous example working with notMNIST. The only difference here is that we have list of bbox coordinates that are different for each example. However, you see that actually, always encode the data as list of values (!) even when we only have a single peice of data to encode. '''
33
79
0.707833
63446c7a99da759defffb8b6c5b8fda7062a7938
1,589
py
Python
stix_shifter/stix_transmission/src/modules/synchronous_dummy/synchronous_dummy_connector.py
jlstrick83/stix-shifter
7903bb16f58f4ec9b7d22d575aef2a1dee63e815
[ "Apache-2.0" ]
null
null
null
stix_shifter/stix_transmission/src/modules/synchronous_dummy/synchronous_dummy_connector.py
jlstrick83/stix-shifter
7903bb16f58f4ec9b7d22d575aef2a1dee63e815
[ "Apache-2.0" ]
null
null
null
stix_shifter/stix_transmission/src/modules/synchronous_dummy/synchronous_dummy_connector.py
jlstrick83/stix-shifter
7903bb16f58f4ec9b7d22d575aef2a1dee63e815
[ "Apache-2.0" ]
1
2020-10-19T18:19:02.000Z
2020-10-19T18:19:02.000Z
from ..base.base_connector import BaseConnector # from .synchronous_dummy_results_connector import SynchronousDummyResultsConnector # from .synchronous_dummy_ping import SynchronousDummyPing import time class Connector(BaseConnector): def __init__(self): self.is_async = False self.results_connector = self self.ping_connector = self def ping(self): return "synchronous ping" def create_results_connection(self, params, options): """ Creates a connection to the specified datasource to send a query :param params: the parameters for the query :param options: CLI options passed in :return: in dummy connectors, just returns passed in parameters """ config = params['config'] # The post-processed query, already translated from STIX SCO query = params['query'] # set headers headers = { "Content-Type": "application/json", "Accept": "application/json" } # construct request object, purely for visual purposes in dummy implementation request = { "host": config['host'], "path": config['path'] + query, "port": config['port'], "headers": headers, "method": "GET" } print(request) time.sleep(3) dummy_data = {"obj_1": {}, "obj_2": {}, "obj_3": {}, "obj_4": {}, "obj_5": {}} return_obj = { "response_code": 200, "query_results": dummy_data } return return_obj
27.877193
86
0.595343
88044b0700c58a2c9be69ebddbfea3021071eecd
913
py
Python
OKT_22_2020/uts_problem_b.py
refeed/PAlgoritmaTRPLA
e0c79c1d57bee0869e2344651718e8cf053c035f
[ "MIT" ]
null
null
null
OKT_22_2020/uts_problem_b.py
refeed/PAlgoritmaTRPLA
e0c79c1d57bee0869e2344651718e8cf053c035f
[ "MIT" ]
null
null
null
OKT_22_2020/uts_problem_b.py
refeed/PAlgoritmaTRPLA
e0c79c1d57bee0869e2344651718e8cf053c035f
[ "MIT" ]
null
null
null
""" PETUNJUK MASUKAN Dua baris simbol yang terdiri dari dua karakter: [] = Kertas, () = Batu, 8< = Gunting. Baris Pertama adalah simbol yang dipilih Pak Blangkon dan baris kedua adalah simbol yang dipilih Pak Semar. PETUNJUK KELUARAN Pemenang suit. "Blangkon" atau "Semar" atau "Seri". """ blangkon = input().strip() semar = input().strip() BATU_STR = '()' GUNTING_STR = '8<' KERTAS_STR = '[]' BLANKGON_STR = 'Blangkon' SEMAR_STR = 'Semar' if semar == blangkon: print('Seri') elif semar == BATU_STR: if blangkon == GUNTING_STR: print(SEMAR_STR) elif blangkon == KERTAS_STR: print(BLANKGON_STR) elif semar == GUNTING_STR: if blangkon == KERTAS_STR: print(SEMAR_STR) elif blangkon == BATU_STR: print(BLANKGON_STR) elif semar == KERTAS_STR: if blangkon == BATU_STR: print(SEMAR_STR) elif blangkon == GUNTING_STR: print(BLANKGON_STR)
23.410256
78
0.668127
07f269b58d971c7fec41e572745b1f5ac7a49d28
2,152
py
Python
lib/node_modules/@stdlib/math/base/special/inv/benchmark/python/benchmark.py
ghalimi/stdlib
88f50b88aa945875ef053e2f89d26f9150a18c12
[ "BSL-1.0" ]
3,428
2016-07-14T13:48:46.000Z
2022-03-31T22:32:13.000Z
benchmark/python/benchmark.py
stdlib-js/math-base-special-inv
79958d1a4451cbae162e7ab4a5114cb048d971e5
[ "BSL-1.0" ]
435
2016-04-07T18:12:45.000Z
2022-03-22T15:43:17.000Z
lib/node_modules/@stdlib/math/base/special/inv/benchmark/python/benchmark.py
sthagen/stdlib
042b6215818db0e2a784e72c7e054167dcefcd2a
[ "BSL-1.0" ]
188
2016-11-29T22:58:11.000Z
2022-03-17T06:46:43.000Z
#!/usr/bin/env python # # @license Apache-2.0 # # Copyright (c) 2018 The Stdlib Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Benchmark inv.""" from __future__ import print_function import timeit NAME = "inv" REPEATS = 3 ITERATIONS = 1000000 def print_version(): """Print the TAP version.""" print("TAP version 13") def print_summary(total, passing): """Print the benchmark summary. # Arguments * `total`: total number of tests * `passing`: number of passing tests """ print("#") print("1.." + str(total)) # TAP plan print("# total " + str(total)) print("# pass " + str(passing)) print("#") print("# ok") def print_results(elapsed): """Print benchmark results. # Arguments * `elapsed`: elapsed time (in seconds) # Examples ``` python python> print_results(0.131009101868) ``` """ rate = ITERATIONS / elapsed print(" ---") print(" iterations: " + str(ITERATIONS)) print(" elapsed: " + str(elapsed)) print(" rate: " + str(rate)) print(" ...") def benchmark(): """Run the benchmark and print benchmark results.""" setup = "from random import random;" stmt = "y = 1000.0*random() - 500.0; z = 1.0 / y;" t = timeit.Timer(stmt, setup=setup) print_version() for i in range(REPEATS): print("# python::" + NAME) elapsed = t.timeit(number=ITERATIONS) print_results(elapsed) print("ok " + str(i+1) + " benchmark finished") print_summary(REPEATS, REPEATS) def main(): """Run the benchmark.""" benchmark() if __name__ == "__main__": main()
21.959184
74
0.631506
0996f24f0cf7672aae1afa5c25e6812228e9d292
2,223
py
Python
env/lib/python3.6/site-packages/telegram/payment/orderinfo.py
rogerscristo/BotFWD
4f2ab1f4f4543c157ca0a79084536c065f74159f
[ "MIT" ]
null
null
null
env/lib/python3.6/site-packages/telegram/payment/orderinfo.py
rogerscristo/BotFWD
4f2ab1f4f4543c157ca0a79084536c065f74159f
[ "MIT" ]
3
2017-09-01T22:18:30.000Z
2017-09-01T22:24:57.000Z
env/lib/python3.6/site-packages/telegram/payment/orderinfo.py
rogerscristo/BotFWD
4f2ab1f4f4543c157ca0a79084536c065f74159f
[ "MIT" ]
3
2018-02-22T22:20:27.000Z
2018-04-22T10:58:24.000Z
#!/usr/bin/env python # # A library that provides a Python interface to the Telegram Bot API # Copyright (C) 2015-2017 # Leandro Toledo de Souza <devs@python-telegram-bot.org> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser Public License for more details. # # You should have received a copy of the GNU Lesser Public License # along with this program. If not, see [http://www.gnu.org/licenses/]. """This module contains an object that represents a Telegram OrderInfo.""" from telegram import TelegramObject, ShippingAddress class OrderInfo(TelegramObject): """This object represents information about an order. Attributes: name (:obj:`str`): Optional. User name. phone_number (:obj:`str`): Optional. User's phone number. email (:obj:`str`): Optional. User email. shipping_address (:class:`telegram.ShippingAddress`): Optional. User shipping address. Args: name (:obj:`str`, optional): User name. phone_number (:obj:`str`, optional): User's phone number. email (:obj:`str`, optional): User email. shipping_address (:class:`telegram.ShippingAddress`, optional): User shipping address. **kwargs (:obj:`dict`): Arbitrary keyword arguments. """ def __init__(self, name=None, phone_number=None, email=None, shipping_address=None, **kwargs): self.name = name self.phone_number = phone_number self.email = email self.shipping_address = shipping_address @classmethod def de_json(cls, data, bot): if not data: return cls() data = super(OrderInfo, cls).de_json(data, bot) data['shipping_address'] = ShippingAddress.de_json(data.get('shipping_address'), bot) return cls(**data)
38.327586
99
0.677463
954424045dbabfe3e20a5e20bf553b1e5aab787b
1,079
py
Python
binding-python/runtime/src/test/python/tests/binding/transport/TestPlainMailbox.py
apache/etch
5a875755019a7f342a07c8c368a50e3efb6ae68c
[ "ECL-2.0", "Apache-2.0" ]
9
2015-02-14T15:09:54.000Z
2021-11-10T15:09:45.000Z
binding-python/runtime/src/test/python/tests/binding/support/TestValidator_string.py
apache/etch
5a875755019a7f342a07c8c368a50e3efb6ae68c
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
binding-python/runtime/src/test/python/tests/binding/support/TestValidator_string.py
apache/etch
5a875755019a7f342a07c8c368a50e3efb6ae68c
[ "ECL-2.0", "Apache-2.0" ]
14
2015-04-20T10:35:00.000Z
2021-11-10T15:09:35.000Z
# Licensed to the Apache Software Foundation (ASF) under one * # or more contributor license agreements. See the NOTICE file * # distributed with this work for additional information * # regarding copyright ownership. The ASF licenses this file * # to you under the Apache License, Version 2.0 (the * # "License"); you may not use this file except in compliance * # with the License. You may obtain a copy of the License at * # * # http://www.apache.org/licenses/LICENSE-2.0 * # * # Unless required by applicable law or agreed to in writing, * # software distributed under the License is distributed on an * # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * # KIND, either express or implied. See the License for the * # specific language governing permissions and limitations * # under the License. import unittest if __name__ == '__main__': unittest.main()
46.913043
65
0.603336
1e5de86e6786d2bd67077f7ef2f300a87d5d77e2
4,760
py
Python
apps/log_extract/migrations/0017_auto_20201111_1050.py
qqqqqie/bk-log
1765f1901aafaa6fb6a57b8db5d35dd32b3cb5c1
[ "MIT" ]
75
2021-07-14T09:32:36.000Z
2022-03-31T15:26:53.000Z
apps/log_extract/migrations/0017_auto_20201111_1050.py
qqqqqie/bk-log
1765f1901aafaa6fb6a57b8db5d35dd32b3cb5c1
[ "MIT" ]
561
2021-07-14T07:45:47.000Z
2022-03-31T11:41:28.000Z
apps/log_extract/migrations/0017_auto_20201111_1050.py
qqqqqie/bk-log
1765f1901aafaa6fb6a57b8db5d35dd32b3cb5c1
[ "MIT" ]
41
2021-07-14T07:39:50.000Z
2022-03-25T09:22:18.000Z
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making BK-LOG 蓝鲸日志平台 available. Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. BK-LOG 蓝鲸日志平台 is licensed under the MIT License. License for BK-LOG 蓝鲸日志平台: -------------------------------------------------------------------- Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ # Generated by Django 1.11.23 on 2020-11-11 02:50 from __future__ import unicode_literals import apps.models from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ("log_extract", "0016_tasks_ex_data"), ] operations = [ migrations.CreateModel( name="ExtractLink", fields=[ ("link_id", models.AutoField(primary_key=True, serialize=False, verbose_name="链路id")), ( "link_type", models.CharField( choices=[("common", "内网链路"), ("qcloud_cos", "腾讯云cos链路")], default="common", max_length=20, verbose_name="链路类型", ), ), ("operator", models.CharField(max_length=255, verbose_name="执行人")), ("op_bk_biz_id", models.IntegerField(verbose_name="执行bk_biz_id")), ( "qcloud_secret_id", apps.models.EncryptionField( blank=True, default="", help_text="内网链路不需要填写", null=True, verbose_name="腾讯云SecretId" ), ), ( "qcloud_secret_key", apps.models.EncryptionField( blank=True, default="", help_text="内网链路不需要填写", null=True, verbose_name="腾讯云SecretKey" ), ), ( "qcloud_cos_bucket", models.CharField( blank=True, default="", help_text="内网链路不需要填写", max_length=255, verbose_name="腾讯云Cos桶名称" ), ), ( "qcloud_cos_region", models.CharField( blank=True, default="", help_text="内网链路不需要填写", max_length=255, verbose_name="腾讯云Cos区域" ), ), ("is_enable", models.BooleanField(default=True, verbose_name="是否启用")), ], options={ "verbose_name": "提取链路", "verbose_name_plural": "提取链路", }, ), migrations.CreateModel( name="ExtractLinkHost", fields=[ ("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")), ("target_dir", models.CharField(default="", max_length=255, verbose_name="挂载目录")), ("bk_cloud_id", models.IntegerField(verbose_name="主机云区域id")), ("ip", models.GenericIPAddressField(verbose_name="主机ip")), ("link", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="log_extract.ExtractLink")), ], options={ "verbose_name": "链路中转机", "verbose_name_plural": "链路中转机", }, ), migrations.AddField( model_name="tasks", name="cos_file_name", field=models.CharField(blank=True, max_length=255, null=True, verbose_name="cos对象文件名称"), ), migrations.AddField( model_name="tasks", name="link_id", field=models.IntegerField(blank=True, null=True, verbose_name="链路id"), ), ]
44.90566
119
0.570798
db4288848aa51e7afbfd3711c6093a890fe110ab
1,930
py
Python
tworaven_apps/configurations/migrations/0005_auto_20171120_1555.py
Mital188/TwoRavens
f84751b33fde26cd379d8120b3c6a6b5ed2c315d
[ "Apache-2.0" ]
20
2017-12-11T07:26:06.000Z
2021-11-22T16:16:20.000Z
tworaven_apps/configurations/migrations/0005_auto_20171120_1555.py
Mital188/TwoRavens
f84751b33fde26cd379d8120b3c6a6b5ed2c315d
[ "Apache-2.0" ]
849
2017-10-20T18:21:18.000Z
2022-02-18T02:45:44.000Z
tworaven_apps/configurations/migrations/0005_auto_20171120_1555.py
Mital188/TwoRavens
f84751b33fde26cd379d8120b3c6a6b5ed2c315d
[ "Apache-2.0" ]
1
2020-05-18T06:02:13.000Z
2020-05-18T06:02:13.000Z
# -*- coding: utf-8 -*- # Generated by Django 1.11.4 on 2017-11-20 20:55 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('configurations', '0004_remove_appconfiguration_d3m_mode'), ] operations = [ migrations.AlterField( model_name='appconfiguration', name='app_domain', field=models.CharField(choices=[('D3M_DOMAIN', 'D3M_DOMAIN'), ('DATAVERSE_DOMAIN', 'DATAVERSE_DOMAIN'), ('EVENTDATA_DOMAIN', 'EVENTDATA_DOMAIN')], max_length=70, verbose_name='.js variable "APP_DOMAIN"'), ), migrations.AlterField( model_name='appconfiguration', name='d3m_svc_url', field=models.CharField(default='/d3m-service/', help_text='URL used to make calls that are converted to gRPC messages and sent to D3M applications', max_length=255, verbose_name='.js variable "D3M_SVC_URL"'), ), migrations.AlterField( model_name='appconfiguration', name='dataverse_url', field=models.URLField(help_text='URL to Dataverseexamples: https://beta.dataverse.org,https://dataverse.harvard.edu', verbose_name='.js variable "DATAVERSE_URL"'), ), migrations.AlterField( model_name='appconfiguration', name='production', field=models.BooleanField(help_text=' True -> data, metadata from live server resources instead of local versions', verbose_name='.js variable "PRODUCTION".'), ), migrations.AlterField( model_name='appconfiguration', name='rook_svc_url', field=models.CharField(default='/rook-custom/', help_text='URL to the rook server. examples: https://beta.dataverse.org/custom/, http://127.0.0.1:8080/rook-custom/', max_length=255, verbose_name='.js variable "ROOK_SVC_URL"'), ), ]
47.073171
238
0.658549
7e4267f5e16c9cc549f04165ea2515657520f42a
286
py
Python
api/source/testing/cases/translate_case.py
1pkg/ReRe
83f77d2cece0fb5f6d7b86a395fcca7d4e16459f
[ "MIT" ]
1
2019-12-17T10:31:48.000Z
2019-12-17T10:31:48.000Z
api/source/testing/cases/translate_case.py
c-pkg/ReRe
83f77d2cece0fb5f6d7b86a395fcca7d4e16459f
[ "MIT" ]
null
null
null
api/source/testing/cases/translate_case.py
c-pkg/ReRe
83f77d2cece0fb5f6d7b86a395fcca7d4e16459f
[ "MIT" ]
1
2019-04-29T08:19:36.000Z
2019-04-29T08:19:36.000Z
from .base_case import BaseCase from actions import Translate class TranslateCase(BaseCase): def test_bad_subjects(self): return NotImplemented def test_translate_result(self): return NotImplemented def test_extinct(self): return NotImplemented
19.066667
36
0.734266
579c275e5fa4eea50b321a7be426037c75a1a6f9
15,507
py
Python
mk_resps_from_sim_depths.py
parsotat/NITRATES
f5ce071410795c044ed7c139d6d07fd21e2dba9e
[ "MIT" ]
null
null
null
mk_resps_from_sim_depths.py
parsotat/NITRATES
f5ce071410795c044ed7c139d6d07fd21e2dba9e
[ "MIT" ]
null
null
null
mk_resps_from_sim_depths.py
parsotat/NITRATES
f5ce071410795c044ed7c139d6d07fd21e2dba9e
[ "MIT" ]
null
null
null
import numpy as np import os from astropy.table import Table from astropy.io import fits from numba import jit, njit, prange from scipy import interpolate from math import erf import argparse def cli(): parser = argparse.ArgumentParser() parser.add_argument('--fname', type=str,\ help="depth dist file name", default=None) args = parser.parse_args() return args cal_resp_fname = '/storage/work/jjd330/caldb_files/data/swift/bat/cpf/swbresponse20030101v007.rsp' resp_ebins_tab = Table.read(cal_resp_fname, hdu='EBOUNDS') print(resp_ebins_tab.colnames) params_fname = '/storage/work/jjd330/caldb_files/data/swift/bat/bcf/swbparams20030101v009.fits' mt_tab = Table.read(params_fname, hdu=2) params_tab = Table.read(params_fname) print(mt_tab.colnames) print(params_tab.colnames) params_header = fits.open(params_fname)[1].header psv = [] for i in range(14): psv.append(float(params_header['PSV_'+str(i)])) print(psv) depth_fname = '/storage/work/jjd330/caldb_files/data/swift/bat/bcf/swbdepthdis20030101v003.fits' dtab = Table.read(depth_fname) print(dtab.colnames) pha_emins = resp_ebins_tab['E_MIN'] pha_emaxs = resp_ebins_tab['E_MAX'] pha_emins = np.round(pha_emins.astype(np.float)[:-1], decimals=1) pha_emaxs = np.round(pha_emaxs.astype(np.float)[:-1], decimals=1) pha_extras = np.round(np.logspace(np.log10(194.9), np.log10(500.0), 24+1), decimals=1) pha_extras = np.append(pha_extras, [1e5]) pha_emins = np.append(pha_emins, pha_extras[:-1]) pha_emaxs = np.append(pha_emaxs, pha_extras[1:]) Npha_bins = len(pha_emins) print(Npha_bins) Ephotons = np.linspace(10.0, 100.0, 90+1)[:-1] + 0.5 Ephotons = np.append(Ephotons, np.linspace(100.5, 200.5, 50+1)[:-1]) Ephotons = np.append(Ephotons, np.logspace(np.log10(200.5), 2.75, 40+1)) Ephotons = np.append(Ephotons, [600.0, 700.0, 900.0, 1.5e3, 3e3, 6e3]) SUB_BINS = 10 CD_EDGE = 26.72 TE_EDGE = 31.82 EK1_CD = 23.172 EK1_TE = 27.471 N_DEPTHS = 1000 DET_THICKNESS = 0.2 dX = DET_THICKNESS/N_DEPTHS EXP_REF = 31.0 EXP_CUTOFF = 0.01 EXP_LAMB = 0.276815564 # EXP_CFF = 0.0 EXP_CFF = 0.2197 EXP_IND = 1.956559051 NORM_ADJ = 1.069053483 psv = [798.38422, -2.5190028, 25.51, 313.545, -2.36265, 0.0851972, -0.1008352, 0.882394, 80.72, 30609.4, -22.9841, 0.577041, -0.477249, 0.334311] GAIN_ = [400.0, -4.66162E-7, 2.19376E-4, 0.998148, -1.53554E-4, 1.07273] SIGMA_ = [76.76, 0.0, 0.0, 1.34514, 0.0, 0.00917815, 0.640623] def adjust_gain(E): if E < GAIN_[0]: gain_adjust = GAIN_[3] + GAIN_[2]*E + GAIN_[1]*E*E else: gain_adjust = GAIN_[5] + GAIN_[4]*E return gain_adjust def get_sigma(E): if E < SIGMA_[0]: sigma = SIGMA_[3] else: sigma = SIGMA_[6] + SIGMA_[5]*E return sigma @njit(cache=True, fastmath=True) def hecht(lambda_e, lambda_h, depth): ''' Inputs: lambda_e: mean distance electrons travel in the detector (cm) lambda_h: mean distance holes travel in the detector (cm) depth: distance below the top surface of the detector (cm) Output: charge induction efficiency at that depth (dimensionless) ''' return (lambda_e*(1.0-np.exp(-(DET_THICKNESS-depth)/lambda_e)) +\ lambda_h*(1.0-np.exp(-depth/lambda_h)))/DET_THICKNESS @njit(cache=True, fastmath=True, parallel=True) def mutau_model(mutaue, mutauh, voltage, gain_adjust, zbins0, zbins1,\ n_bins, E, norm, emax, dist): # print(mutaue) # print(voltage) # print(DET_THICKNESS) # print(mutaue*voltage/DET_THICKNESS) lambda_e = mutaue*voltage/DET_THICKNESS lambda_h = mutauh*voltage/DET_THICKNESS # dx = DET_THICKNESS/n_depths dzs = zbins1 - zbins0 zax = (zbins0 + zbins1)/2. # print "dx: ", dx max_hecht_depth = lambda_h*DET_THICKNESS/(lambda_e+lambda_h) n_depths = len(zbins0) # print "max_hecht_depth: ", max_hecht_depth result = np.zeros(n_bins) for i in prange(n_depths): # depth = (i+0.5)*dx depth = DET_THICKNESS - zax[i] slice_eff_area = dist[i]*dzs[i] eff_energy = E*gain_adjust*hecht(lambda_e,lambda_h,depth)/\ hecht(lambda_e,lambda_h,max_hecht_depth) if (eff_energy <= emax[n_bins-1]): # find the bin (j) that corresponds to eff_energy j=0 while (emax[j]<eff_energy): j+=1 # add norm*slice_eff_area to the contents of that ph bin result[j]+=norm*slice_eff_area return result def pha_bins2pre_pha_bins(emins, emaxs, sub_bins=10): nphabins = len(emins) nphabins_pre = nphabins*sub_bins emins_pre = np.zeros(nphabins_pre) emaxs_pre = np.zeros(nphabins_pre) for i in range(nphabins): emin = emins[i] emax = emaxs[i] de = (emax - emin)/sub_bins for j in range(sub_bins): ind = i*sub_bins + j emins_pre[ind] = emin + j*de emaxs_pre[ind] = emins_pre[ind] + de return emins_pre, emaxs_pre sqrt_half = 1./np.sqrt(2.0) @njit(cache=True) def norm_cdf(x, sig): x_ = sqrt_half*x/sig cdf = 0.5*(1. + erf(x_)) return cdf @njit(cache=True) def gauss_conv(res_pre_gauss, emins, emaxs, emins_pre, emaxs_pre, sigma): Nphas_bins = len(emins) # emins_pre, emaxs_pre = pha_bins2pre_pha_bins(emins, emaxs) ecents_pre = (emins_pre+emaxs_pre)/2. Npha_bins_pre = len(emins_pre) result = np.zeros(Nphas_bins) # gauss = stats.norm(loc=0.0, scale=sigma) for i in range(Npha_bins_pre): ecent = ecents_pre[i] # gauss = stats.norm(loc=ecent, scale=sigma) pre_res = res_pre_gauss[i] for j in range(Nphas_bins): gauss_prob = norm_cdf(emaxs[j]-ecent, sigma) - norm_cdf(emins[j]-ecent, sigma) # gauss_probs = gauss.cdf(emaxs) - gauss.cdf(emins) result[j] += gauss_prob*pre_res return result def multi_mutau_func(Es, nphabins, mt_tab, voltage, dist, zbins0, zbins1,\ pha_emins, pha_emaxs): sigma = get_sigma(Es[-1]) nphabins_pre = nphabins*SUB_BINS Ne = len(Es) result_pre_gauss = np.zeros(nphabins_pre) emin_pre, emax_pre = pha_bins2pre_pha_bins(pha_emins, pha_emaxs,\ sub_bins=SUB_BINS) # dx = DET_THICKNESS/n_depths # dist = dists[0] dzs = zbins1 - zbins0 dist_eff_area = 0.0 dist_tot = np.sum(dist*dzs) for row in mt_tab: frac = row['fraction'] norm_this_mt = frac*NORM_ADJ # print row # print norm_this_mt for j,E in enumerate(Es): gain_adjust = adjust_gain(E) res_pre_gauss = mutau_model(row['mutau_e'], row['mutau_h'],\ voltage, gain_adjust, zbins0, zbins1,\ nphabins_pre, E, norm_this_mt,\ emax_pre, dist[j]) # print np.sum(res_pre_gauss) result_pre_gauss += res_pre_gauss res_pre_gauss = result_pre_gauss result = gauss_conv(res_pre_gauss, pha_emins.astype(np.float), pha_emaxs.astype(np.float),\ emin_pre.astype(np.float), emax_pre.astype(np.float), sigma) return result ''' Funcs outside the DRMgen stuff ''' def get_comp_depths(comp_depth_tab, PrimaryE, comp_dEax, colname): # comp_Eax = (comp_Ebins[:-1]+comp_Ebins[1:])/2. # comp_dEax = PrimaryE - comp_Eax comp_Ebin_width = comp_dEax[0] - comp_dEax[1] Nebins = len(comp_dEax) Nz = len(comp_depth_tab[colname][0]) # print Nebins # print Nz # tab_Eax = (comp_depth_tab[colname]) comp_emids = (comp_depth_tab['Ehi'] + comp_depth_tab['Elow'])/2. comp_dEmids = PrimaryE - comp_emids depths = np.zeros((Nebins, Nz)) for i in range(Nebins): ind0 = np.digitize(comp_dEax[i], comp_dEmids) - 1 if ind0 < 0: depths[i] += comp_depth_tab[colname][0]*comp_Ebin_width elif ind0 >= (len(comp_dEmids)-1): depths[i] += comp_depth_tab[colname][-1]*comp_Ebin_width else: ind1 = ind0 + 1 dE = comp_dEmids[ind1] - comp_dEmids[ind0] wt0 = (comp_dEmids[ind1] - comp_dEax[i])/dE wt1 = (comp_dEax[i] - comp_dEmids[ind0])/dE depths[i] += wt0*comp_depth_tab[colname][ind0]*comp_Ebin_width depths[i] += wt1*comp_depth_tab[colname][ind1]*comp_Ebin_width return depths def get_col_row_strs(cnames): col_row_strs = [] for cname in cnames: if cname in ['Ehi', 'Elow', 'Energy']: continue cname_list = cname.split('_') try: if 'comp' in cname_list: col0 = int(cname_list[-8]) col1 = int(cname_list[-7]) row0 = int(cname_list[-5]) row1 = int(cname_list[-4]) else: col0 = int(cname_list[-5]) col1 = int(cname_list[-4]) row0 = int(cname_list[-2]) row1 = int(cname_list[-1]) except Exception as E: col0, col1, row0, row1 = 0, 16, 0, 16 cr_str = 'cols_%d_%d_rows_%d_%d' %(col0,col1,row0,row1) col_row_strs.append(cr_str) col_row_strs = set(col_row_strs) # print len(col_row_strs) return col_row_strs def get_resp_dicts(depth_file, Ephotons, pha_emins, pha_emaxs): depth_tab = depth_file[1].data PrimaryEs = depth_tab['Energy'] ztab = depth_file[-1].data z_lows = ztab['Zlow'] z_highs = ztab['Zhi'] Nzbins = len(z_lows) dzs = z_highs - z_lows Npha_bins = len(pha_emins) line_col_row_strs = get_col_row_strs(depth_tab.columns.names) comp_col_row_strs = get_col_row_strs(depth_file[2].data.columns.names) Elows = [10.0] Ehis = [] for i in range(len(Ephotons)): Ehis.append(Ephotons[i] + (Ephotons[i] - Elows[i])) if i < len(Ephotons)-1: Elows.append(Ehis[i]) res_dicts = [] orientation_names = ['NonEdges', 'right', 'left', 'top', 'bot'] for ii,Ephoton in enumerate(Ephotons): res_dict = {} res_dict['ENERG_LO'] = Elows[ii] res_dict['ENERG_HI'] = Ehis[ii] Primary_ind0 = np.digitize(Ephoton, PrimaryEs) - 1 Primary_ind1 = Primary_ind0 + 1 print(PrimaryEs[Primary_ind0], PrimaryEs[Primary_ind1]) dE = PrimaryEs[Primary_ind1] - PrimaryEs[Primary_ind0] wt0 = (PrimaryEs[Primary_ind1] - Ephoton)/dE wt1 = (Ephoton - PrimaryEs[Primary_ind0])/dE print(wt0, wt1) comp_Ebins = np.linspace(10.0, Ephoton, int(Ephoton-10.0)+1) comp_Eax = (comp_Ebins[:-1]+comp_Ebins[1:])/2. comp_dEax = Ephoton - comp_Eax for col_row in line_col_row_strs: cname_list = col_row.split('_') col0 = int(cname_list[-5]) col1 = int(cname_list[-4]) row0 = int(cname_list[-2]) row1 = int(cname_list[-1]) for oname in orientation_names: depth_list = [] Es = [] cname = oname + '_' + col_row peak_depth = np.zeros(Nzbins) cd_depth = np.zeros(Nzbins) te_depth = np.zeros(Nzbins) for ei, wt in zip([Primary_ind0, Primary_ind1],[wt0,wt1]): try: peak_depth += wt*depth_tab['PEAK_' + oname + '_' + col_row][ei] cd_depth += wt*depth_tab['CD_' + oname + '_' + col_row][ei] te_depth += wt*depth_tab['TE_' + oname + '_' + col_row][ei] except: peak_depth += wt*depth_tab['PEAK_' + oname][ei] cd_depth += wt*depth_tab['CD_' + oname][ei] te_depth += wt*depth_tab['TE_' + oname][ei] Es = [Ephoton, Ephoton-EK1_CD, Ephoton-EK1_TE] depth_list = [peak_depth, cd_depth, te_depth] lines_res = multi_mutau_func(Es, Npha_bins, mt_tab, 200.0, depth_list,\ z_lows.astype(np.float), z_highs.astype(np.float),\ pha_emins, pha_emaxs) res_dict[cname] = lines_res for col_row in comp_col_row_strs: cname_list = col_row.split('_') col0 = int(cname_list[-5]) col1 = int(cname_list[-4]) row0 = int(cname_list[-2]) row1 = int(cname_list[-1]) for oname in orientation_names: cname = oname + '_' + col_row depths = np.zeros((len(comp_dEax),Nzbins)) colname = cname+'_comp_Depth_dE' if len(comp_Eax) < 2: res_dict[cname+'_comp'] = np.zeros(Npha_bins) continue for ei, wt in zip([Primary_ind0, Primary_ind1],[wt0,wt1]): PrimaryE = PrimaryEs[ei] print(PrimaryE) print(depth_file[ei+2].name) comp_tab = depth_file[ei+2].data if not colname in comp_tab.columns.names: colname = oname + '_comp_Depth_dE' comp_emids = (comp_tab['Ehi'] + comp_tab['Elow'])/2. comp_dEmids = PrimaryE - comp_emids comp_ebin_widths = (comp_tab['Ehi'] - comp_tab['Elow']) # print len(comp_tab) # print comp_tab.columns.names try: depths_ = get_comp_depths(comp_tab, PrimaryE, comp_dEax, colname) depths += wt*depths_ except Exception as E: print(E) print("trouble with depth from") print("ind: ", ei) print(colname) comp_res = multi_mutau_func(comp_Eax, Npha_bins, mt_tab, 200.0, depths,\ z_lows.astype(np.float), z_highs.astype(np.float),\ pha_emins, pha_emaxs) res_dict[cname+'_comp'] = comp_res res_dicts.append(res_dict) print('********************************') print("done with Energy %.3f" %(Ephoton)) print('********************************') print() return res_dicts def main(args): depth_fname = args.fname depth_file = fits.open(depth_fname) theta = float(depth_fname.split('_')[-4]) phi = float(depth_fname.split('_')[-2]) res_dicts = get_resp_dicts(depth_file, Ephotons, pha_emins, pha_emaxs) drm_tab = Table(data=res_dicts) ebounds_tab = Table(data=[np.arange(len(pha_emaxs),dtype=np.int),pha_emins,pha_emaxs], names=['CHANNEL', 'E_MIN', 'E_MAX'], dtype=[np.int,np.float,np.float]) primary_hdu = fits.PrimaryHDU() drm_hdu = fits.table_to_hdu(drm_tab) ebounds_hdu = fits.table_to_hdu(ebounds_tab) ebounds_hdu.name = 'EBOUNDS' hdul = fits.HDUList([primary_hdu, drm_hdu, ebounds_hdu]) save_dname = '/storage/work/jjd330/local/bat_data/resp_tabs/' fname = 'drm_theta_%.1f_phi_%.1f_.fits'%(theta,phi) save_fname = os.path.join(save_dname, fname) hdul.writeto(save_fname, overwrite=True) if __name__ == "__main__": args = cli() main(args)
30.34638
98
0.584962
acbfe46c70512008a1e3874b08efb55f1d771b1c
528
py
Python
PythonScript/obtain_access_token.py
oleitao/ScriptsMegaCollection
2ec9fc0c1bdf6d81ce9bd16088a7abbd346f5b1e
[ "CC0-1.0" ]
null
null
null
PythonScript/obtain_access_token.py
oleitao/ScriptsMegaCollection
2ec9fc0c1bdf6d81ce9bd16088a7abbd346f5b1e
[ "CC0-1.0" ]
null
null
null
PythonScript/obtain_access_token.py
oleitao/ScriptsMegaCollection
2ec9fc0c1bdf6d81ce9bd16088a7abbd346f5b1e
[ "CC0-1.0" ]
null
null
null
#https://github.com/reddit-archive/reddit/wiki/OAuth2-Quick-Start-Example import requests import requests.auth client_auth = requests.auth.HTTPBasicAuth('<REDDIT_CLIENT_ID>', '<REDDIT_CLIENT_SECRET>') post_data = {"grant_type": "password", "username": "<REDDIT_CLIENT_USERNAME>", "password": "<REDDIT_CLIENT_PASSWORD>"} headers = {"User-Agent": "ChangeMeClient/0.1 by YourUsername"} response = requests.post("https://www.reddit.com/api/v1/access_token", auth=client_auth, data=post_data, headers=headers) print(response.json())
58.666667
121
0.776515
2bc70726adaa48f5a2d6ae1bc2ef07773b7bd651
5,882
py
Python
tensorflow_probability/python/experimental/inference_gym/targets/logistic_regression.py
bourov/probability
1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2
[ "Apache-2.0" ]
2
2020-12-17T20:43:24.000Z
2021-06-11T22:09:16.000Z
tensorflow_probability/python/experimental/inference_gym/targets/logistic_regression.py
bourov/probability
1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2
[ "Apache-2.0" ]
2
2021-08-25T16:14:51.000Z
2022-02-10T04:47:11.000Z
tensorflow_probability/python/experimental/inference_gym/targets/logistic_regression.py
bourov/probability
1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2
[ "Apache-2.0" ]
1
2020-12-19T13:05:15.000Z
2020-12-19T13:05:15.000Z
# Lint as: python2, python3 # Copyright 2020 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Logistic regression models.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.experimental.inference_gym.internal import data from tensorflow_probability.python.experimental.inference_gym.targets import bayesian_model from tensorflow_probability.python.experimental.inference_gym.targets import model __all__ = [ 'GermanCreditNumericLogisticRegression', 'LogisticRegression', ] def _add_bias(features): return tf.concat([features, tf.ones([tf.shape(features)[0], 1])], axis=-1) class LogisticRegression(bayesian_model.BayesianModel): """Bayesian logistic regression with a Gaussian prior.""" def __init__( self, train_features, train_labels, test_features=None, test_labels=None, name='logistic_regression', pretty_name='Logistic Regression', ): """Construct the logistic regression model. Args: train_features: Floating-point `Tensor` with shape `[num_train_points, num_features]`. Training features. train_labels: Integer `Tensor` with shape `[num_train_points]`. Training labels. test_features: Floating-point `Tensor` with shape `[num_test_points, num_features]`. Testing features. Can be `None`, in which case test-related sample transformations are not computed. test_labels: Integer `Tensor` with shape `[num_test_points]`. Testing labels. Can be `None`, in which case test-related sample transformations are not computed. name: Python `str` name prefixed to Ops created by this class. pretty_name: A Python `str`. The pretty name of this model. Raises: ValueError: If `test_features` and `test_labels` are either not both `None` or not both specified. """ with tf.name_scope(name): train_features = tf.convert_to_tensor(train_features, tf.float32) train_features = _add_bias(train_features) train_labels = tf.convert_to_tensor(train_labels) num_features = int(train_features.shape[1]) self._prior_dist = tfd.Sample(tfd.Normal(0., 1.), num_features) def log_likelihood_fn(weights, features, labels, reduce_sum=True): """The log_likelihood function.""" logits = tf.einsum('nd,...d->...n', features, weights) log_likelihood = tfd.Bernoulli(logits=logits).log_prob(labels) if reduce_sum: return tf.reduce_sum(log_likelihood, [-1]) else: return log_likelihood self._train_log_likelihood_fn = functools.partial( log_likelihood_fn, features=train_features, labels=train_labels) sample_transformations = { 'identity': model.Model.SampleTransformation( fn=lambda params: params, pretty_name='Identity', ) } if (test_features is not None) != (test_labels is not None): raise ValueError('`test_features` and `test_labels` must either both ' 'be `None` or both specified. Got: test_features={}, ' 'test_labels={}'.format(test_features, test_labels)) if test_features is not None and test_labels is not None: test_features = tf.convert_to_tensor(test_features, tf.float32) test_features = _add_bias(test_features) test_labels = tf.convert_to_tensor(test_labels) test_log_likelihood_fn = functools.partial( log_likelihood_fn, features=test_features, labels=test_labels) sample_transformations['test_nll'] = ( model.Model.SampleTransformation( fn=test_log_likelihood_fn, pretty_name='Test NLL', )) sample_transformations['per_example_test_nll'] = ( model.Model.SampleTransformation( fn=functools.partial(test_log_likelihood_fn, reduce_sum=False), pretty_name='Per-example Test NLL', )) super(LogisticRegression, self).__init__( default_event_space_bijector=tfb.Identity(), event_shape=self._prior_dist.event_shape, dtype=self._prior_dist.dtype, name=name, pretty_name=pretty_name, sample_transformations=sample_transformations, ) def _prior_distribution(self): return self._prior_dist def log_likelihood(self, value): return self._train_log_likelihood_fn(value) class GermanCreditNumericLogisticRegression(LogisticRegression): """Bayesian logistic regression with a Gaussian prior. This model uses the German Credit (numeric) data set [1]. #### References 1. https://archive.ics.uci.edu/ml/datasets/statlog+(german+credit+data) """ def __init__(self): dataset = data.german_credit_numeric() del dataset['test_features'] del dataset['test_labels'] super(GermanCreditNumericLogisticRegression, self).__init__( name='german_credit_numeric_logistic_regression', pretty_name='German Credit Numeric Logistic Regression', **dataset)
37.464968
91
0.702822
3708acb5d5edd82fa02b2ec138a56c3a69281b10
8,615
py
Python
seirsplus/utilities.py
boazbk/seirsplus
040f167eed92cd947be2bd51749e95c5dc75a797
[ "MIT" ]
null
null
null
seirsplus/utilities.py
boazbk/seirsplus
040f167eed92cd947be2bd51749e95c5dc75a797
[ "MIT" ]
null
null
null
seirsplus/utilities.py
boazbk/seirsplus
040f167eed92cd947be2bd51749e95c5dc75a797
[ "MIT" ]
1
2021-05-27T06:42:31.000Z
2021-05-27T06:42:31.000Z
import sys import numpy import matplotlib.pyplot as pyplot def gamma_dist(mean, coeffvar, N): scale = mean*coeffvar**2 shape = mean/scale return numpy.random.gamma(scale=scale, shape=shape, size=N) def dist_info(dists, names=None, plot=False, bin_size=1, colors=None, reverse_plot=False): dists = [dists] if not isinstance(dists, list) else dists names = [names] if(names is not None and not isinstance(names, list)) else (names if names is not None else [None]*len(dists)) colors = [colors] if(colors is not None and not isinstance(colors, list)) else (colors if colors is not None else pyplot.rcParams['axes.prop_cycle'].by_key()['color']) for i, (dist, name) in enumerate(zip(dists, names)): print((name+": " if name else "")+" mean = %.2f, std = %.2f, 95%% CI = (%.2f, %.2f)" % (numpy.mean(dist), numpy.std(dist), numpy.percentile(dist, 2.5), numpy.percentile(dist, 97.5))) print() if(plot): pyplot.hist(dist, bins=numpy.arange(0, int(max(dist)+1), step=bin_size), label=(name if name else False), color=colors[i], edgecolor='white', alpha=0.6, zorder=(-1*i if reverse_plot else i)) if(plot): pyplot.ylabel('num nodes') pyplot.legend(loc='upper right') pyplot.show() def network_info(networks, names=None, plot=False, bin_size=1, colors=None, reverse_plot=False): import networkx networks = [networks] if not isinstance(networks, list) else networks names = [names] if not isinstance(names, list) else names colors = [colors] if(colors is not None and not isinstance(colors, list)) else (colors if colors is not None else pyplot.rcParams['axes.prop_cycle'].by_key()['color']) for i, (network, name) in enumerate(zip(networks, names)): degree = [d[1] for d in network.degree()] if(name): print(name+":") print("Degree: mean = %.2f, std = %.2f, 95%% CI = (%.2f, %.2f)\n coeff var = %.2f" % (numpy.mean(degree), numpy.std(degree), numpy.percentile(degree, 2.5), numpy.percentile(degree, 97.5), numpy.std(degree)/numpy.mean(degree))) r = networkx.degree_assortativity_coefficient(network) print("Assortativity: %.2f" % (r)) c = networkx.average_clustering(network) print("Clustering coeff: %.2f" % (c)) print() if(plot): pyplot.hist(degree, bins=numpy.arange(0, int(max(degree)+1), step=bin_size), label=(name+" degree" if name else False), color=colors[i], edgecolor='white', alpha=0.6, zorder=(-1*i if reverse_plot else i)) if(plot): pyplot.ylabel('num nodes') pyplot.legend(loc='upper right') pyplot.show() def results_summary(model): print("total percent infected: %0.2f%%" % ((model.total_num_infected()[-1]+model.total_num_recovered()[-1])/model.numNodes * 100) ) print("total percent fatality: %0.2f%%" % (model.numF[-1]/model.numNodes * 100) ) print("peak pct hospitalized: %0.2f%%" % (numpy.max(model.numH)/model.numNodes * 100) ) ######################################################################################################################################### try: import pandas as pd import seaborn as sns import matplotlib.pyplot as plt def last(x): """Return last element of a pandas Series""" return x.iloc[-1] def summarize(df): """Return a Series with last value, sum of values, and weighted average of values""" temp = df.copy() orig_cols = list(df.columns) todrop = [] for col in orig_cols: if col == 'time': continue tcol = temp[['time',col]].dropna() lengths = (tcol['time'] - tcol['time'].shift(1)).fillna(0) total = sum(lengths) temp = temp.assign(**{col+"/scaled": tcol[col] * lengths / total if total else 0}) todrop.append(col+"/scaled/last") temp = temp.fillna(0) summary = temp.agg([last, numpy.sum]) summary = summary.stack() summary.index = ['/'.join(reversed(col)).strip() for col in summary.index.values] summary.drop(todrop,inplace=True) summary.rename({col+"/scaled/sum": col+"/average" for col in orig_cols},inplace=True) return summary def make_compact(val): """Take a potentially object and reduce it to smaller for logging. If the object is number - return it If the object is a long list or ndarray of numbers - return its average Otherwise, stringify it If the object is a string - return its first 30 characters """ if isinstance(val, (int, float, numpy.number)): return val if isinstance(val, numpy.ndarray): return val.flatten().mean() if isinstance(val, (list, tuple)): if len(val)<5: return str(val) if isinstance(val[0], (int, float, numpy.number)): return sum(val) / len(val) return str(val)[:64] def hist2df(history , **kwargs): """Take history dictionary and return: pandas DataFrame of all history pandas Series of the summary of history, taking the last value and the sum, as well average over time (sum of scaled) Optional kwargs argument - if given then add them to the dataFrame and DataSeries - helpful when merging many logs from different runs. """ L = [{'time': t, **d} for t, d in history.items()] n = len(L) df = pd.DataFrame(L) if 'numPositive' in df.columns: df['overallPositive'] = df['numPositive'].fillna(0).cumsum() summary = summarize(df) # add to summary statistics up to first detection test_lag = 0 if 'test_lag' in kwargs: test_lag = kwargs['test_lag'] else: for t,d in history.items(): if 'isolation_lag_positive' in d: test_lag = d['isolation_lag_positive'] break detectionTime = -1 firstPositiveTestTime = -1 temp = df[df.numPositive>0] row = None if len(temp)>0: firstPositiveTestTime = temp['time'].iloc[0] detectionTime = firstPositiveTestTime + test_lag temp = df[df['time']<= detectionTime] if len(temp): summary2 = summarize(temp) summary2.rename({col: col+"/1st" for col in summary2.index }, inplace=True) summary = summary.append(summary2) summary = summary.append(pd.Series([firstPositiveTestTime, test_lag, detectionTime], index= ['firstPositiveTestTime', 'test_lag', 'detectionTime'])) if kwargs: for key,val in kwargs.items(): val = make_compact(val) df[key] = val summary[key] = val return df, summary def violin_plot(lists, labels, title="", ylabel="", xlabel=""): sns.set() fig, ax = plt.subplots(figsize=(16, 8)) vp = ax.violinplot(lists, showmeans=True) i = 1 for pc in vp['bodies']: pc.set_color(f'C{i}') for partname in ('cbars', 'cmins', 'cmaxes', 'cmeans'): pc = vp[partname] pc.set_edgecolor("black") pc.set_linewidth(1) ax.get_xaxis().set_tick_params(direction='out') ax.xaxis.set_ticks_position('bottom') ax.set_xticks(numpy.arange(1, len(labels) + 1)) ax.set_xticklabels(labels, rotation=45, ha='right') ax.set_xlim(0.25, len(labels) + 0.75) ax.set_ylabel(ylabel) if xlabel: ax.set_xlabel(xlabel) if title: ax.set_title(title) plt.show() def show_violins(data, field, groupby = 'variant', ylabel=None,title="", key = None): """Show 'violin graphs' of a certain field according to different variants""" plots = [] labels = [] if ylabel is None: ylabel = field for v in sorted(data[groupby].unique(), key=key): plots.append(data[data[groupby] == v][field]) labels.append(v) violin_plot(plots, labels, ylabel=ylabel, title=title) except ImportError: print("Warning: pandas missing - some logging functions will not work", file=sys.stderr) def last(x): raise NotImplementedError("This function requires pandas to work") def hist2df(history): raise NotImplementedError("This function requires pandas to work")
38.9819
216
0.586651
d2afdcdf348b346003031b19b7ad92ffebb3c6bd
3,785
py
Python
io_scene_halo/file_qua/import_qua.py
AerialDave144/Halo-Asset-Blender-Development-Toolset
f1b0c0b22806ebabaf0126ad864896193c02307f
[ "MIT" ]
36
2020-11-29T04:36:19.000Z
2022-03-16T22:54:45.000Z
io_scene_halo/file_qua/import_qua.py
AerialDave144/Halo-Asset-Blender-Development-Toolset
f1b0c0b22806ebabaf0126ad864896193c02307f
[ "MIT" ]
18
2020-05-24T07:07:55.000Z
2020-08-24T20:34:14.000Z
io_scene_halo/file_qua/import_qua.py
AerialDave144/Halo-Asset-Blender-Development-Toolset
f1b0c0b22806ebabaf0126ad864896193c02307f
[ "MIT" ]
15
2020-12-02T13:28:52.000Z
2022-03-12T00:14:10.000Z
# ##### BEGIN MIT LICENSE BLOCK ##### # # MIT License # # Copyright (c) 2021 Steven Garcia # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # ##### END MIT LICENSE BLOCK ##### import bpy from ..global_functions import global_functions class QUAAsset(global_functions.HaloAsset): class Scene: def __init__(self, version, name): self.version = version self.name = name class Units: def __init__(self, name, path, bit_0, bit_1, bit_2): self.name = name self.path = path self.bit_0 = bit_0 self.bit_1 = bit_1 self.bit_2 = bit_2 class Scenery: def __init__(self, name, path, bit_0, bit_1, bit_2): self.name = name self.path = path self.bit_0 = bit_0 self.bit_1 = bit_1 self.bit_2 = bit_2 class EffectsScenery: def __init__(self, name, path, bit_0, bit_1, bit_2): self.name = name self.path = path self.bit_0 = bit_0 self.bit_1 = bit_1 self.bit_2 = bit_2 class Shots: def __init__(self, frames, audio_data): self.frames = frames self.audio_data = audio_data class ExtraShots: def __init__(self, frames, audio_data): self.frames = frames self.audio_data = audio_data class Frames: def __init__(self, position, up, forward, fov, aperture, focal_length, depth_of_field, near_focal, far_focal, focal_depth, blur_amount): self.position = position self.up = up self.forward = forward self.fov = fov self.aperture = aperture self.focal_length = focal_length self.depth_of_field = depth_of_field self.near_focal = near_focal self.far_focal = far_focal self.focal_depth = focal_depth self.blur_amount = blur_amount class AudioData: def __init__(self, filepath, frame, name): self.filepath = filepath self.frame = frame self.name = name def __init__(self, context): self.version = 5 self.name = "placeholder" self.shots = [] self.units = [] self.scenery = [] self.effects_scenery = [] self.extra_cameras = [] self.extra_shots = [] if self.left() != 0: # is something wrong with the parser? raise RuntimeError("%s elements left after parse end" % self.left()) def load_file(context, filepath, report): ass_file = QUAAsset(filepath) report({'INFO'}, "Import completed successfully") return {'FINISHED'} if __name__ == '__main__': bpy.ops.import_scene.qua()
33.495575
144
0.632232
703be92d46c87c14d74b34df90451bf53ce7a31d
419
py
Python
haasomeapi/dataobjects/custombots/dataobjects/CryptoIndexBotIndexResult.py
iamcos/haasomeapi
eac1640cc13e1e7649b8a8d6ed88184722c907c8
[ "MIT" ]
9
2018-07-08T22:40:53.000Z
2022-03-21T20:32:43.000Z
haasomeapi/dataobjects/custombots/dataobjects/CryptoIndexBotIndexResult.py
iamcos/haasomeapi
eac1640cc13e1e7649b8a8d6ed88184722c907c8
[ "MIT" ]
5
2018-08-25T11:48:05.000Z
2019-12-12T19:57:20.000Z
haasomeapi/dataobjects/custombots/dataobjects/CryptoIndexBotIndexResult.py
iamcos/haasomeapi
eac1640cc13e1e7649b8a8d6ed88184722c907c8
[ "MIT" ]
6
2018-08-31T23:49:36.000Z
2022-01-08T04:51:21.000Z
class CryptoIndexBotIndexResult: """ Data Object containing a Crypto Index Bot Index Result :ivar coin: str: :ivar inWallet: float: :ivar indexValue: float: :ivar deactivated: bool: :ivar targetPercentage: float: :ivar currentPercentage: float: """ coin: str inWallet: float indexValue: float deactivated: bool targetPercentage: float currentPercentage: float
22.052632
62
0.682578
357e4f2aa26809c6492fa206d68518ffce49263f
3,484
py
Python
huaweicloud-sdk-swr/huaweicloudsdkswr/v2/model/list_triggers_details_request.py
wuchen-huawei/huaweicloud-sdk-python-v3
3683d703f4320edb2b8516f36f16d485cff08fc2
[ "Apache-2.0" ]
1
2021-11-03T07:54:50.000Z
2021-11-03T07:54:50.000Z
huaweicloud-sdk-swr/huaweicloudsdkswr/v2/model/list_triggers_details_request.py
wuchen-huawei/huaweicloud-sdk-python-v3
3683d703f4320edb2b8516f36f16d485cff08fc2
[ "Apache-2.0" ]
null
null
null
huaweicloud-sdk-swr/huaweicloudsdkswr/v2/model/list_triggers_details_request.py
wuchen-huawei/huaweicloud-sdk-python-v3
3683d703f4320edb2b8516f36f16d485cff08fc2
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 import pprint import re import six class ListTriggersDetailsRequest: """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ sensitive_list = [] openapi_types = { 'namespace': 'str', 'repository': 'str' } attribute_map = { 'namespace': 'namespace', 'repository': 'repository' } def __init__(self, namespace=None, repository=None): """ListTriggersDetailsRequest - a model defined in huaweicloud sdk""" self._namespace = None self._repository = None self.discriminator = None self.namespace = namespace self.repository = repository @property def namespace(self): """Gets the namespace of this ListTriggersDetailsRequest. 组织名称 :return: The namespace of this ListTriggersDetailsRequest. :rtype: str """ return self._namespace @namespace.setter def namespace(self, namespace): """Sets the namespace of this ListTriggersDetailsRequest. 组织名称 :param namespace: The namespace of this ListTriggersDetailsRequest. :type: str """ self._namespace = namespace @property def repository(self): """Gets the repository of this ListTriggersDetailsRequest. 镜像仓库名称 :return: The repository of this ListTriggersDetailsRequest. :rtype: str """ return self._repository @repository.setter def repository(self, repository): """Sets the repository of this ListTriggersDetailsRequest. 镜像仓库名称 :param repository: The repository of this ListTriggersDetailsRequest. :type: str """ self._repository = repository def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: if attr in self.sensitive_list: result[attr] = "****" else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ListTriggersDetailsRequest): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
25.617647
77
0.561137
2cb76e913c727638b4039c461b158231888b0e32
8,814
py
Python
examples/common/python/connectors/direct/worker_registry_jrpc_impl.py
AvalonRelease5/avalon
0bd78b907fba0896c512678b5b560c3f358e787c
[ "Apache-2.0" ]
null
null
null
examples/common/python/connectors/direct/worker_registry_jrpc_impl.py
AvalonRelease5/avalon
0bd78b907fba0896c512678b5b560c3f358e787c
[ "Apache-2.0" ]
null
null
null
examples/common/python/connectors/direct/worker_registry_jrpc_impl.py
AvalonRelease5/avalon
0bd78b907fba0896c512678b5b560c3f358e787c
[ "Apache-2.0" ]
null
null
null
# Copyright 2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging from utility.hex_utils import is_valid_hex_str from service_client.generic import GenericServiceClient from connectors.interfaces.worker_registry_interface import WorkerRegistryInterface from utility.tcf_types import WorkerType, JsonRpcErrorCode from connectors.utils import create_jrpc_response,validate_details logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s", level=logging.INFO) class WorkerRegistryJRPCImpl(WorkerRegistryInterface): def __init__(self, config): self.__uri_client = GenericServiceClient(config["tcf"]["json_rpc_uri"]) def worker_register(self, worker_id, worker_type, org_id, application_type_ids, details, id=None): """ Adds worker details to registry """ if worker_id is None or not is_valid_hex_str(worker_id): logging.error("Worker id is empty or Invalid") return create_jrpc_response(id, JsonRpcErrorCode.INVALID_PARAMETER, "Worker id is empty or Invalid") if not isinstance(worker_type, WorkerType): logging.error("Invalid worker type") return create_jrpc_response(id, JsonRpcErrorCode.INVALID_PARAMETER, "Invalid worker type") if org_id is not None and not is_valid_hex_str(org_id): logging.error("Invalid organization id") return create_jrpc_response(id, JsonRpcErrorCode.INVALID_PARAMETER, "Invalid organization id") if application_type_ids is not None: for app_id in application_type_ids: if not is_valid_hex_str(app_id): logging.error("Invalid application type id") return create_jrpc_response( id, JsonRpcErrorCode.INVALID_PARAMETER, "Invalid application type id") if details is not None: is_valid = validate_details(details) if is_valid is not None: logging.error(is_valid) return create_jrpc_response(id, JsonRpcErrorCode.INVALID_PARAMETER, is_valid) json_rpc_request = { "jsonrpc": "2.0", "method": "WorkerRegister", "id": id, "params": { "workerId": worker_id, "workerType": worker_type.value, "organizationId": org_id, "applicationTypeId": application_type_ids, "details": json.loads(details) } } response = self.__uri_client._postmsg(json.dumps(json_rpc_request)) return response def worker_update(self, worker_id, details, id=None): """ Update worker with new information """ if worker_id is None or not is_valid_hex_str(worker_id): logging.error("Worker id is empty or invalid") return create_jrpc_response(id, JsonRpcErrorCode.INVALID_PARAMETER, "Worker id is empty or Invalid") json_rpc_request = { "jsonrpc": "2.0", "method": "WorkerUpdate", "id": id, "params": { "workerId": worker_id, "details": details } } response = self.__uri_client._postmsg(json.dumps(json_rpc_request)) return response def worker_set_status(self, worker_id, status, id=None): """ Set the worker status to active, offline, decommissioned or compromised state """ if worker_id is None or not is_valid_hex_str(worker_id): logging.error("Worker id is empty or Invalid") return create_jrpc_response(id, JsonRpcErrorCode.INVALID_PARAMETER, "Worker id is empty or Invalid") json_rpc_request = { "jsonrpc": "2.0", "method": "WorkerSetStatus", "id": id, "params": { "workerId": worker_id, "status": status.value } } response = self.__uri_client._postmsg(json.dumps(json_rpc_request)) return response def worker_retrieve(self, worker_id, id=None): """ Retrieve the worker identified by worker id """ if worker_id is None or not is_valid_hex_str(worker_id): logging.error("Worker id is empty or Invalid") return create_jrpc_response(id, JsonRpcErrorCode.INVALID_PARAMETER, "Worker id is empty or Invalid") json_rpc_request = { "jsonrpc": "2.0", "method": "WorkerRetrieve", "id": id, "params": { "workerId": worker_id } } response = self.__uri_client._postmsg(json.dumps(json_rpc_request)) return response def worker_lookup(self, worker_type=None, organization_id=None, application_type_id=None, id=None): """ Worker lookup based on worker type, organization id and application id""" json_rpc_request = { "jsonrpc": "2.0", "method": "WorkerLookUp", "id": id, "params": { } } if worker_type is not None: if not isinstance(worker_type, WorkerType): logging.error("Invalid worker type") return create_jrpc_response( id, JsonRpcErrorCode.INVALID_PARAMETER, "Invalid worker type") json_rpc_request["params"]["workerType"] = worker_type.value if organization_id is not None: if not is_valid_hex_str(organization_id): logging.error("Invalid organization id") return create_jrpc_response( id, JsonRpcErrorCode.INVALID_PARAMETER, "Invalid organization id") json_rpc_request["params"]["organizationId"] = organization_id if application_type_id is not None: for app_id in application_type_id: if not is_valid_hex_str(app_id): logging.error("Invalid application type id") return create_jrpc_response( id, JsonRpcErrorCode.INVALID_PARAMETER, "Invalid application type id") json_rpc_request["params"]["applicationTypeId"] = application_type_id response = self.__uri_client._postmsg(json.dumps(json_rpc_request)) return response def worker_lookup_next(self, lookup_tag, worker_type=None, organization_id=None, application_type_id=None, id=None): """ Similar to workerLookUp with additional parameter lookup_tag """ json_rpc_request = { "jsonrpc": "2.0", "method": "WorkerLookUpNext", "id": id, "params": { "lookUpTag": lookup_tag } } if worker_type is not None: if not isinstance(worker_type, WorkerType): logging.error("Invalid worker type2") return create_jrpc_response( id, JsonRpcErrorCode.INVALID_PARAMETER, "Invalid worker type") json_rpc_request["params"]["workerType"] = worker_type.value if organization_id is not None: if not is_valid_hex_str(organization_id): logging.error("Invalid organization id") return create_jrpc_response( id, JsonRpcErrorCode.INVALID_PARAMETER, "Invalid organization id") json_rpc_request["params"]["organizationId"] = organization_id if application_type_id is not None: for app_id in application_type_id: if not is_valid_hex_str(app_id): logging.error("Invalid application type id") return create_jrpc_response( id, JsonRpcErrorCode.INVALID_PARAMETER, "Invalid application type id") json_rpc_request["params"]["applicationTypeId"] = application_type_id response = self.__uri_client._postmsg(json.dumps(json_rpc_request)) return response
41.380282
91
0.605854
7efab3e073dfd74b97a4c90023f7a92c4914b053
5,802
py
Python
django/contrib/gis/db/backends/spatialite/creation.py
kennethlove/django
18aed863b46e719d7efaeab624fb8aa4cd62f360
[ "BSD-3-Clause" ]
1
2016-05-09T02:41:07.000Z
2016-05-09T02:41:07.000Z
django/contrib/gis/db/backends/spatialite/creation.py
kennethlove/django
18aed863b46e719d7efaeab624fb8aa4cd62f360
[ "BSD-3-Clause" ]
null
null
null
django/contrib/gis/db/backends/spatialite/creation.py
kennethlove/django
18aed863b46e719d7efaeab624fb8aa4cd62f360
[ "BSD-3-Clause" ]
null
null
null
import os from django.conf import settings from django.core.cache import get_cache from django.core.cache.backends.db import BaseDatabaseCache from django.core.exceptions import ImproperlyConfigured from django.db.backends.sqlite3.creation import DatabaseCreation class SpatiaLiteCreation(DatabaseCreation): def create_test_db(self, verbosity=1, autoclobber=False): """ Creates a test database, prompting the user for confirmation if the database already exists. Returns the name of the test database created. This method is overloaded to load up the SpatiaLite initialization SQL prior to calling the `syncdb` command. """ # Don't import django.core.management if it isn't needed. from django.core.management import call_command test_database_name = self._get_test_db_name() if verbosity >= 1: test_db_repr = '' if verbosity >= 2: test_db_repr = " ('%s')" % test_database_name print("Creating test database for alias '%s'%s..." % (self.connection.alias, test_db_repr)) self._create_test_db(verbosity, autoclobber) self.connection.close() self.connection.settings_dict["NAME"] = test_database_name # Confirm the feature set of the test database self.connection.features.confirm() # Need to load the SpatiaLite initialization SQL before running `syncdb`. self.load_spatialite_sql() # Report syncdb messages at one level lower than that requested. # This ensures we don't get flooded with messages during testing # (unless you really ask to be flooded) call_command('syncdb', verbosity=max(verbosity - 1, 0), interactive=False, database=self.connection.alias, load_initial_data=False) # We need to then do a flush to ensure that any data installed by # custom SQL has been removed. The only test data should come from # test fixtures, or autogenerated from post_syncdb triggers. # This has the side effect of loading initial data (which was # intentionally skipped in the syncdb). call_command('flush', verbosity=max(verbosity - 1, 0), interactive=False, database=self.connection.alias) from django.core.cache import get_cache from django.core.cache.backends.db import BaseDatabaseCache for cache_alias in settings.CACHES: cache = get_cache(cache_alias) if isinstance(cache, BaseDatabaseCache): call_command('createcachetable', cache._table, database=self.connection.alias) # Get a cursor (even though we don't need one yet). This has # the side effect of initializing the test database. cursor = self.connection.cursor() return test_database_name def sql_indexes_for_field(self, model, f, style): "Return any spatial index creation SQL for the field." from django.contrib.gis.db.models.fields import GeometryField output = super(SpatiaLiteCreation, self).sql_indexes_for_field(model, f, style) if isinstance(f, GeometryField): gqn = self.connection.ops.geo_quote_name qn = self.connection.ops.quote_name db_table = model._meta.db_table output.append(style.SQL_KEYWORD('SELECT ') + style.SQL_TABLE('AddGeometryColumn') + '(' + style.SQL_TABLE(gqn(db_table)) + ', ' + style.SQL_FIELD(gqn(f.column)) + ', ' + style.SQL_FIELD(str(f.srid)) + ', ' + style.SQL_COLTYPE(gqn(f.geom_type)) + ', ' + style.SQL_KEYWORD(str(f.dim)) + ', ' + style.SQL_KEYWORD(str(int(not f.null))) + ');') if f.spatial_index: output.append(style.SQL_KEYWORD('SELECT ') + style.SQL_TABLE('CreateSpatialIndex') + '(' + style.SQL_TABLE(gqn(db_table)) + ', ' + style.SQL_FIELD(gqn(f.column)) + ');') return output def load_spatialite_sql(self): """ This routine loads up the SpatiaLite SQL file. """ if self.connection.ops.spatial_version[:2] >= (3, 0): # Spatialite >= 3.0.x -- No need to load any SQL file, calling # InitSpatialMetaData() transparently creates the spatial metadata # tables cur = self.connection._cursor() cur.execute("SELECT InitSpatialMetaData()") else: # Spatialite < 3.0.x -- Load the initial SQL # Getting the location of the SpatiaLite SQL file, and confirming # it exists. spatialite_sql = self.spatialite_init_file() if not os.path.isfile(spatialite_sql): raise ImproperlyConfigured('Could not find the required SpatiaLite initialization ' 'SQL file (necessary for testing): %s' % spatialite_sql) # Opening up the SpatiaLite SQL initialization file and executing # as a script. with open(spatialite_sql, 'r') as sql_fh: cur = self.connection._cursor() cur.executescript(sql_fh.read()) def spatialite_init_file(self): # SPATIALITE_SQL may be placed in settings to tell GeoDjango # to use a specific path to the SpatiaLite initilization SQL. return getattr(settings, 'SPATIALITE_SQL', 'init_spatialite-%s.%s.sql' % self.connection.ops.spatial_version[:2])
43.62406
103
0.609962
fbdfaff055df9d41601e93876666382e25abe2ad
1,765
py
Python
crx_python_webdriver/basetestcase.py
carn1x/crx-python-webdriver
913d23c17906852583d957f4b96d4d93e53494de
[ "MIT" ]
1
2015-02-26T01:49:25.000Z
2015-02-26T01:49:25.000Z
crx_python_webdriver/basetestcase.py
carn1x/crx-python-webdriver
913d23c17906852583d957f4b96d4d93e53494de
[ "MIT" ]
null
null
null
crx_python_webdriver/basetestcase.py
carn1x/crx-python-webdriver
913d23c17906852583d957f4b96d4d93e53494de
[ "MIT" ]
null
null
null
import unittest import sys import os from selenium.common.exceptions import NoSuchElementException, NoAlertPresentException from conf import settings def make_blocking(fd): import fcntl flags = fcntl.fcntl(fd, fcntl.F_GETFL) if flags & os.O_NONBLOCK: fcntl.fcntl(fd, fcntl.F_SETFL, flags & ~os.O_NONBLOCK) class BaseTestCase(unittest.TestCase): def setUp(self): try: make_blocking(sys.stdin.fileno()) make_blocking(sys.stdout.fileno()) except ImportError: pass self.driver = settings.DRIVER() self.driver.implicitly_wait(settings.IMPLICITLY_WAIT) self.base_url = settings.BASE_URL self.verification_errors = [] self.accept_next_alert = True def is_element_present(self, how, what): try: self.driver.find_element(by=how, value=what) except NoSuchElementException: return False return True def is_alert_present(self): try: self.driver.switch_to.alert() except NoAlertPresentException: return False return True def close_alert_and_get_its_text(self): try: alert = self.driver.switch_to.alert() alert_text = alert.text if self.accept_next_alert: alert.accept() else: alert.dismiss() return alert_text finally: self.accept_next_alert = True def tearDown(self): self.driver.quit() self.assertEqual([], self.verification_errors) def navigate_to(self, path=None): path = path or '' self.driver.get(self.base_url + "/" + path) if __name__ == "__main__": unittest.main()
25.214286
86
0.616431
b4d547bcb3ea33068033f1662dae87657bd198fe
6,841
py
Python
scons/scons-local-2.3.3/SCons/Tool/mwcc.py
pedrishi/pdb2pqr_pypka
74f64948658d021a8bfc8fd78936ce4186ffc88e
[ "BSD-3-Clause" ]
null
null
null
scons/scons-local-2.3.3/SCons/Tool/mwcc.py
pedrishi/pdb2pqr_pypka
74f64948658d021a8bfc8fd78936ce4186ffc88e
[ "BSD-3-Clause" ]
null
null
null
scons/scons-local-2.3.3/SCons/Tool/mwcc.py
pedrishi/pdb2pqr_pypka
74f64948658d021a8bfc8fd78936ce4186ffc88e
[ "BSD-3-Clause" ]
null
null
null
"""SCons.Tool.mwcc Tool-specific initialization for the Metrowerks CodeWarrior compiler. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # # Copyright (c) 2001 - 2014 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/mwcc.py 2014/08/24 12:12:31 garyo" import os import os.path import SCons.Util def set_vars(env): """Set MWCW_VERSION, MWCW_VERSIONS, and some codewarrior environment vars MWCW_VERSIONS is set to a list of objects representing installed versions MWCW_VERSION is set to the version object that will be used for building. MWCW_VERSION can be set to a string during Environment construction to influence which version is chosen, otherwise the latest one from MWCW_VERSIONS is used. Returns true if at least one version is found, false otherwise """ desired = env.get('MWCW_VERSION', '') # return right away if the variables are already set if isinstance(desired, MWVersion): return 1 elif desired is None: return 0 versions = find_versions() version = None if desired: for v in versions: if str(v) == desired: version = v elif versions: version = versions[-1] env['MWCW_VERSIONS'] = versions env['MWCW_VERSION'] = version if version is None: return 0 env.PrependENVPath('PATH', version.clpath) env.PrependENVPath('PATH', version.dllpath) ENV = env['ENV'] ENV['CWFolder'] = version.path ENV['LM_LICENSE_FILE'] = version.license plus = lambda x: '+%s' % x ENV['MWCIncludes'] = os.pathsep.join(map(plus, version.includes)) ENV['MWLibraries'] = os.pathsep.join(map(plus, version.libs)) return 1 def find_versions(): """Return a list of MWVersion objects representing installed versions""" versions = [] ### This function finds CodeWarrior by reading from the registry on ### Windows. Some other method needs to be implemented for other ### platforms, maybe something that calls env.WhereIs('mwcc') if SCons.Util.can_read_reg: try: HLM = SCons.Util.HKEY_LOCAL_MACHINE product = 'SOFTWARE\\Metrowerks\\CodeWarrior\\Product Versions' product_key = SCons.Util.RegOpenKeyEx(HLM, product) i = 0 while True: name = product + '\\' + SCons.Util.RegEnumKey(product_key, i) name_key = SCons.Util.RegOpenKeyEx(HLM, name) try: version = SCons.Util.RegQueryValueEx(name_key, 'VERSION') path = SCons.Util.RegQueryValueEx(name_key, 'PATH') mwv = MWVersion(version[0], path[0], 'Win32-X86') versions.append(mwv) except SCons.Util.RegError: pass i = i + 1 except SCons.Util.RegError: pass return versions class MWVersion(object): def __init__(self, version, path, platform): self.version = version self.path = path self.platform = platform self.clpath = os.path.join(path, 'Other Metrowerks Tools', 'Command Line Tools') self.dllpath = os.path.join(path, 'Bin') # The Metrowerks tools don't store any configuration data so they # are totally dumb when it comes to locating standard headers, # libraries, and other files, expecting all the information # to be handed to them in environment variables. The members set # below control what information scons injects into the environment ### The paths below give a normal build environment in CodeWarrior for ### Windows, other versions of CodeWarrior might need different paths. msl = os.path.join(path, 'MSL') support = os.path.join(path, '%s Support' % platform) self.license = os.path.join(path, 'license.dat') self.includes = [msl, support] self.libs = [msl, support] def __str__(self): return self.version CSuffixes = ['.c', '.C'] CXXSuffixes = ['.cc', '.cpp', '.cxx', '.c++', '.C++'] def generate(env): """Add Builders and construction variables for the mwcc to an Environment.""" import SCons.Defaults import SCons.Tool set_vars(env) static_obj, shared_obj = SCons.Tool.createObjBuilders(env) for suffix in CSuffixes: static_obj.add_action(suffix, SCons.Defaults.CAction) shared_obj.add_action(suffix, SCons.Defaults.ShCAction) for suffix in CXXSuffixes: static_obj.add_action(suffix, SCons.Defaults.CXXAction) shared_obj.add_action(suffix, SCons.Defaults.ShCXXAction) env['CCCOMFLAGS'] = '$CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS -nolink -o $TARGET $SOURCES' env['CC'] = 'mwcc' env['CCCOM'] = '$CC $CFLAGS $CCFLAGS $CCCOMFLAGS' env['CXX'] = 'mwcc' env['CXXCOM'] = '$CXX $CXXFLAGS $CCCOMFLAGS' env['SHCC'] = '$CC' env['SHCCFLAGS'] = '$CCFLAGS' env['SHCFLAGS'] = '$CFLAGS' env['SHCCCOM'] = '$SHCC $SHCFLAGS $SHCCFLAGS $CCCOMFLAGS' env['SHCXX'] = '$CXX' env['SHCXXFLAGS'] = '$CXXFLAGS' env['SHCXXCOM'] = '$SHCXX $SHCXXFLAGS $CCCOMFLAGS' env['CFILESUFFIX'] = '.c' env['CXXFILESUFFIX'] = '.cpp' env['CPPDEFPREFIX'] = '-D' env['CPPDEFSUFFIX'] = '' env['INCPREFIX'] = '-I' env['INCSUFFIX'] = '' #env['PCH'] = ? #env['PCHSTOP'] = ? def exists(env): return set_vars(env) # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
32.889423
91
0.64742
c6e0506a01cd2b4d8d9f20361218211f5faba7ab
19,851
py
Python
data/projects/pypara/tests/test_monetary_money.py
se2p/artifact-pynguin-ssbse2020
32b5f4d27ef1b81e5c541471e98fa6e50f5ce8a6
[ "CC-BY-4.0" ]
3
2020-08-20T10:27:13.000Z
2021-11-02T20:28:16.000Z
data/projects/pypara/tests/test_monetary_money.py
se2p/artifact-pynguin-ssbse2020
32b5f4d27ef1b81e5c541471e98fa6e50f5ce8a6
[ "CC-BY-4.0" ]
null
null
null
data/projects/pypara/tests/test_monetary_money.py
se2p/artifact-pynguin-ssbse2020
32b5f4d27ef1b81e5c541471e98fa6e50f5ce8a6
[ "CC-BY-4.0" ]
null
null
null
import datetime from decimal import Decimal import pytest # type: ignore from pypara.currencies import Currencies from pypara.monetary import IncompatibleCurrencyError, Money, NoMoney, NoneMoney, Price, SomeMoney ## Define some currencies: eur = Currencies["EUR"] usd = Currencies["USD"] ## Defines some Decimal quantities: zero = Decimal("0") half = Decimal("0.5") one = Decimal("1") two = Decimal("2") ## Define some dates: today = datetime.date.today() yesterday = today - datetime.timedelta(days=1) def test_implementation() -> None: ## Define instances: _money = Money() # type: ignore smoney = SomeMoney(usd, one, today) nmoney = NoneMoney() ## Check structure: assert _money.__slots__ == () assert smoney.__slots__ == () assert nmoney.__slots__ == () assert not hasattr(_money, "__dict__") assert not hasattr(smoney, "__dict__") assert not hasattr(nmoney, "__dict__") ## Check types assert isinstance(Money.NA, Money) assert isinstance(Money.NA, NoneMoney) assert not isinstance(Money.NA, SomeMoney) assert not isinstance(Money.NA, Price) assert isinstance(_money, Money) assert not isinstance(_money, SomeMoney) assert not isinstance(_money, NoneMoney) assert isinstance(smoney, Money) assert isinstance(smoney, SomeMoney) assert not isinstance(smoney, NoneMoney) assert isinstance(nmoney, Money) assert not isinstance(nmoney, SomeMoney) assert isinstance(nmoney, NoneMoney) def test_of() -> None: assert Money.of(usd, one, None) == Money.NA assert Money.of(usd, None, today) == Money.NA assert Money.of(usd, one, None) == Money.NA assert Money.of(usd, one, today) == SomeMoney(usd, one, today) assert Money.of(usd, Decimal("0.055"), today) == Money.of(usd, Decimal("0.06"), today) assert Money.of(usd, Decimal("0.045"), today) == Money.of(usd, Decimal("0.04"), today) def test_is_equal() -> None: ## Vanilla: assert Money.NA.is_equal(NoMoney) assert Money.NA.is_equal(NoneMoney()) assert not Money.NA.is_equal(Money.of(usd, zero, today)) assert Money.of(usd, zero, today).is_equal(Money.of(usd, zero, today)) assert Money.of(usd, half, today).is_equal(Money.of(usd, half, today)) assert not Money.of(usd, zero, today).is_equal(Money.of(eur, zero, today)) assert not Money.of(usd, zero, today).is_equal(Money.of(usd, half, today)) assert not Money.of(usd, zero, today).is_equal(Money.of(usd, zero, yesterday)) ## With operator overload: assert Money.NA == NoneMoney() assert Money.NA != Money.of(usd, zero, today) assert Money.of(usd, zero, today) == Money.of(usd, zero, today) assert Money.of(usd, half, today) == Money.of(usd, half, today) assert Money.of(usd, zero, today) != Money.of(eur, zero, today) assert Money.of(usd, zero, today) != Money.of(usd, half, today) assert Money.of(usd, zero, today) != Money.of(usd, zero, yesterday) def test_to_boolean() -> None: ## Vanilla: assert not Money.NA.as_boolean() assert not Money.of(usd, zero, today).as_boolean() assert Money.of(usd, half, today).as_boolean() assert Money.of(usd, -half, today).as_boolean() ## With semantic overload assert not bool(Money.NA) assert not Money.of(usd, zero, today) assert Money.of(usd, half, today) assert Money.of(usd, -half, today) def test_to_float() -> None: ## Vanilla: with pytest.raises(TypeError): Money.NA.as_float() assert Money.of(usd, half, today).as_float() == 0.5 assert type(Money.of(usd, half, today).as_float()) == float ## With overload: with pytest.raises(TypeError): float(Money.NA) assert float(Money.of(usd, half, today)) == 0.5 assert type(float(Money.of(usd, half, today))) == float def test_to_integer() -> None: ## Vanilla: with pytest.raises(TypeError): int(Money.NA) assert int(Money.of(usd, half, today)) == 0 assert type(int(Money.of(usd, half, today))) == int ## With overload: with pytest.raises(TypeError): Money.NA.as_integer() assert Money.of(usd, half, today).as_integer() == 0 assert type(Money.of(usd, half, today).as_integer()) == int def test_abs() -> None: ## Vanilla: assert Money.NA.abs() == Money.NA assert Money.of(usd, zero, today).abs() == Money.of(usd, zero, today) assert Money.of(usd, -one, today).abs() == Money.of(usd, +one, today) assert Money.of(usd, +one, today).abs() == Money.of(usd, +one, today) ## With overload: assert abs(Money.NA) == Money.NA assert abs(Money.of(usd, zero, today)) == Money.of(usd, zero, today) assert abs(Money.of(usd, -one, today)) == Money.of(usd, +one, today) assert abs(Money.of(usd, +one, today)) == Money.of(usd, +one, today) def test_negative() -> None: ## Vanilla: assert Money.NA.negative() == Money.NA assert Money.of(usd, zero, today).negative() == Money.of(usd, zero, today) assert Money.of(usd, -one, today).negative() == Money.of(usd, +one, today) assert Money.of(usd, +one, today).negative() == Money.of(usd, -one, today) ## With overload: assert -Money.NA == Money.NA assert -Money.of(usd, zero, today) == Money.of(usd, zero, today) assert -Money.of(usd, -one, today) == Money.of(usd, +one, today) assert -Money.of(usd, +one, today) == Money.of(usd, -one, today) def test_positive() -> None: ## Vanilla: assert Money.NA.positive() == Money.NA assert Money.of(usd, zero, today).positive() == Money.of(usd, zero, today) assert Money.of(usd, -one, today).positive() == Money.of(usd, -one, today) assert Money.of(usd, +one, today).positive() == Money.of(usd, +one, today) ## With overload: assert +Money.NA == Money.NA assert +Money.of(usd, zero, today) == Money.of(usd, zero, today) assert +Money.of(usd, -one, today) == Money.of(usd, -one, today) assert +Money.of(usd, +one, today) == Money.of(usd, +one, today) def test_round() -> None: ## Vanilla: assert Money.NA.round(2) == Money.NA assert Money.of(usd, zero, today).round(2) == Money.of(usd, zero, today) assert Money.of(usd, -one, today).round(2) == Money.of(usd, -one, today) assert Money.of(usd, +one, today).round(2) == Money.of(usd, +one, today) ## Quick tests: assert Money.of(usd, Decimal("1.555"), today).round(2) == Money.of(usd, Decimal("1.56"), today) assert Money.of(usd, Decimal("1.545"), today).round(2) == Money.of(usd, Decimal("1.54"), today) ## With overload: assert round(Money.NA, 2) == Money.NA assert round(Money.of(usd, zero, today), 2) == Money.of(usd, zero, today) assert round(Money.of(usd, -one, today), 2) == Money.of(usd, -one, today) assert round(Money.of(usd, +one, today), 2) == Money.of(usd, +one, today) assert round(Money.of(usd, Decimal("1.555"), today), 2) == Money.of(usd, Decimal("1.56"), today) assert round(Money.of(usd, Decimal("1.545"), today), 2) == Money.of(usd, Decimal("1.54"), today) ## Extras: assert round(Money.of(usd, Decimal("0.545"), today), 0) == Money.of(usd, Decimal("1"), today) assert round(Money.of(usd, Decimal("1.545"), today), 0) == Money.of(usd, Decimal("2"), today) assert round(Money.of(usd, Decimal("0.545"), today), 1) == Money.of(usd, Decimal("0.5"), today) assert round(Money.of(usd, Decimal("1.545"), today), 1) == Money.of(usd, Decimal("1.5"), today) assert round(Money.of(usd, Decimal("0.45"), today), 1) == Money.of(usd, Decimal("0.4"), today) assert round(Money.of(usd, Decimal("1.45"), today), 1) == Money.of(usd, Decimal("1.4"), today) ## TODO: Following two are not really what round function signature says. mypy can't detect it! assert round(Money.of(usd, Decimal("1.4"), today)) == Money.of(usd, Decimal("1"), today) assert round(Money.of(usd, Decimal("1.5"), today)) == Money.of(usd, Decimal("2"), today) def test_addition() -> None: ## First use `Money.NA`s: assert Money.NA.add(Money.NA) == Money.NA assert Money.NA.add(Money.of(usd, zero, today)) == Money.of(usd, zero, today) assert Money.of(usd, zero, today).add(Money.NA) == Money.of(usd, zero, today) ## Vanilla addition: assert Money.of(usd, zero, today).add(Money.of(usd, zero, today)) == Money.of(usd, zero, today) assert Money.of(usd, zero, today).add(Money.of(usd, one, today)) == Money.of(usd, one, today) assert Money.of(usd, one, today).add(Money.of(usd, one, today)) == Money.of(usd, two, today) assert Money.of(usd, one, today).add(Money.of(usd, -one, today)) == Money.of(usd, zero, today) ## Carry dates forward: assert Money.of(usd, zero, today).add(Money.of(usd, one, yesterday)) == Money.of(usd, one, today) assert Money.of(usd, zero, yesterday).add(Money.of(usd, one, today)) == Money.of(usd, one, today) ## Incompatible currency errors: with pytest.raises(IncompatibleCurrencyError): Money.of(usd, zero, today).add(Money.of(eur, zero, today)) ## Operator overload: assert Money.NA + Money.NA == Money.NA assert Money.NA + Money.of(usd, zero, today) == Money.of(usd, zero, today) assert Money.of(usd, zero, today) + Money.NA == Money.of(usd, zero, today) assert Money.of(usd, zero, today) + Money.of(usd, one, today) == Money.of(usd, one, today) def test_scalar_addition() -> None: ## First use `Money.NA`s: assert Money.NA.scalar_add(1) == Money.NA ## Vanilla addition: assert Money.of(usd, zero, today).scalar_add(1) == Money.of(usd, one, today) assert Money.of(usd, zero, today).scalar_add(1.0) == Money.of(usd, one, today) assert Money.of(usd, zero, today).scalar_add(one) == Money.of(usd, one, today) assert Money.of(usd, zero, today).scalar_add(-1) == Money.of(usd, -one, today) ## Extras: assert Money.of(usd, zero, today).scalar_add(0.5) == Money.of(usd, half, today) assert Money.of(usd, zero, today).scalar_add(Decimal("0.05")) == Money.of(usd, Decimal("0.05"), today) assert Money.of(usd, zero, today).scalar_add(Decimal("0.005")) == Money.of(usd, Decimal("0"), today) assert Money.of(usd, zero, today).scalar_add(Decimal("0.015")) == Money.of(usd, Decimal("0.02"), today) def test_subtraction() -> None: ## First use `Money.NA`s: assert Money.NA.subtract(Money.NA) == Money.NA assert Money.NA.subtract(Money.of(usd, zero, today)) == Money.of(usd, zero, today) assert Money.of(usd, zero, today).subtract(Money.NA) == Money.of(usd, zero, today) ## Vanilla subtraction: assert Money.of(usd, zero, today).subtract(Money.of(usd, zero, today)) == Money.of(usd, zero, today) assert Money.of(usd, zero, today).subtract(Money.of(usd, one, today)) == Money.of(usd, -one, today) assert Money.of(usd, one, today).subtract(Money.of(usd, one, today)) == Money.of(usd, zero, today) assert Money.of(usd, one, today).subtract(Money.of(usd, -one, today)) == Money.of(usd, two, today) ## Carry dates forward: assert Money.of(usd, zero, today).subtract(Money.of(usd, one, yesterday)) == Money.of(usd, -one, today) assert Money.of(usd, zero, yesterday).subtract(Money.of(usd, one, today)) == Money.of(usd, -one, today) ## Incompatible currency errors: with pytest.raises(IncompatibleCurrencyError): Money.of(usd, zero, today).subtract(Money.of(eur, zero, today)) ## Operator overload: assert Money.of(usd, zero, today) - Money.of(usd, one, today) == Money.of(usd, -one, today) assert Money.NA - Money.NA == Money.NA assert Money.NA - Money.of(usd, zero, today) == Money.of(usd, zero, today) assert Money.of(usd, zero, today) - Money.NA == Money.of(usd, zero, today) def test_scalar_subtraction() -> None: ## First use `Money.NA`s: assert Money.NA.scalar_subtract(1) == Money.NA ## Vanilla subtraction: assert Money.of(usd, zero, today).scalar_subtract(1) == Money.of(usd, -one, today) assert Money.of(usd, zero, today).scalar_subtract(1.0) == Money.of(usd, -one, today) assert Money.of(usd, zero, today).scalar_subtract(one) == Money.of(usd, -one, today) assert Money.of(usd, zero, today).scalar_subtract(-1) == Money.of(usd, one, today) ## Operator overload: assert Money.of(usd, zero, today).scalar_subtract(1) == Money.of(usd, -one, today) assert Money.of(usd, zero, today).scalar_subtract(-1) == Money.of(usd, one, today) ## Extras: assert Money.of(usd, zero, today).scalar_subtract(0.5) == Money.of(usd, -half, today) assert Money.of(usd, zero, today).scalar_subtract(Decimal("0.05")) == -Money.of(usd, Decimal("0.05"), today) assert Money.of(usd, zero, today).scalar_subtract(Decimal("0.005")) == -Money.of(usd, Decimal("0"), today) assert Money.of(usd, zero, today).scalar_subtract(Decimal("0.015")) == -Money.of(usd, Decimal("0.02"), today) def test_scalar_multiplication() -> None: ## First use `Money.NA`s: assert Money.NA.multiply(1) == Money.NA ## Vanilla subtraction: assert Money.of(usd, one, today).multiply(1) == Money.of(usd, one, today) assert Money.of(usd, one, today).multiply(2) == Money.of(usd, two, today) assert Money.of(usd, -one, today).multiply(1) == Money.of(usd, -one, today) assert Money.of(usd, -one, today).multiply(2) == Money.of(usd, -two, today) ## Other types: assert Money.of(usd, one, today).multiply(1) == Money.of(usd, one, today) assert Money.of(usd, one, today).multiply(1.0) == Money.of(usd, one, today) assert Money.of(usd, one, today).multiply(one) == Money.of(usd, one, today) ## Operator overload: assert Money.NA * 1 == Money.NA assert Money.of(usd, one, today) * 1 == Money.of(usd, one, today) assert Money.of(usd, one, today) * 2 == Money.of(usd, two, today) assert Money.of(usd, -one, today) * 1 == Money.of(usd, -one, today) assert Money.of(usd, -one, today) * 2 == Money.of(usd, -two, today) ## Extras assert Money.of(usd, one, today).multiply(Decimal("0.050")) == Money.of(usd, Decimal("0.05"), today) assert Money.of(usd, one, today).multiply(Decimal("0.005")) == Money.of(usd, Decimal("0.00"), today) assert Money.of(usd, one, today).multiply(Decimal("0.015")) == Money.of(usd, Decimal("0.02"), today) def test_division() -> None: ## First use `Money.NA`s: assert Money.NA.divide(1) == Money.NA ## Vanilla subtraction: assert Money.of(usd, one, today).divide(1) == Money.of(usd, one, today) assert Money.of(usd, one, today).divide(2) == Money.of(usd, half, today) assert Money.of(usd, -one, today).divide(1) == Money.of(usd, -one, today) assert Money.of(usd, -one, today).divide(2) == Money.of(usd, -half, today) ## Various divisor types: assert Money.of(usd, one, today).divide(2) == Money.of(usd, half, today) assert Money.of(usd, one, today).divide(2.0) == Money.of(usd, half, today) assert Money.of(usd, one, today).divide(two) == Money.of(usd, half, today) ## Division by zero: assert Money.of(usd, one, today).divide(0) == Money.NA assert Money.of(usd, one, today).divide(zero) == Money.NA assert Money.of(usd, one, today).divide(0.0) == Money.NA ## Operator overload: assert Money.NA / 1 == Money.NA assert Money.of(usd, one, today) / 1 == Money.of(usd, one, today) assert Money.of(usd, one, today) / 2 == Money.of(usd, half, today) assert Money.of(usd, -one, today) / 1 == Money.of(usd, -one, today) assert Money.of(usd, -one, today) / 2 == Money.of(usd, -half, today) assert Money.of(usd, -one, today) / 0 == Money.NA ## Extras assert Money.of(usd, one, today).divide(Decimal("10")) == Money.of(usd, Decimal("0.10"), today) assert Money.of(usd, one, today).divide(Decimal("50")) == Money.of(usd, Decimal("0.02"), today) assert Money.of(usd, one, today).divide(Decimal("100")) == Money.of(usd, Decimal("0.01"), today) assert Money.of(usd, one, today).divide(Decimal("1000")) == Money.of(usd, Decimal("0.00"), today) def test_floor_division() -> None: ## First use `Money.NA`s: assert Money.NA.floor_divide(1) == Money.NA ## Vanilla subtraction: assert Money.of(usd, one, today).floor_divide(1) == Money.of(usd, one, today) assert Money.of(usd, one, today).floor_divide(2) == Money.of(usd, zero, today) assert Money.of(usd, -one, today).floor_divide(1) == Money.of(usd, -one, today) assert Money.of(usd, -one, today).floor_divide(2) == Money.of(usd, zero, today) ## Various divisor types: assert Money.of(usd, one, today).floor_divide(2) == Money.of(usd, zero, today) assert Money.of(usd, one, today).floor_divide(2.0) == Money.of(usd, zero, today) assert Money.of(usd, one, today).floor_divide(two) == Money.of(usd, zero, today) ## Division by zero: assert Money.of(usd, one, today).floor_divide(0) == Money.NA assert Money.of(usd, one, today).floor_divide(zero) == Money.NA assert Money.of(usd, one, today).floor_divide(0.0) == Money.NA ## Operator overload: assert Money.NA / 1 == Money.NA assert Money.of(usd, one, today) // 1 == Money.of(usd, one, today) assert Money.of(usd, one, today) // 2 == Money.of(usd, zero, today) assert Money.of(usd, -one, today) // 1 == Money.of(usd, -one, today) assert Money.of(usd, -one, today) // 2 == Money.of(usd, zero, today) assert Money.of(usd, -one, today) // 0 == Money.NA ## Extras assert Money.of(usd, Decimal("10"), today).floor_divide(Decimal("10")) == Money.of(usd, Decimal("1.00"), today) assert Money.of(usd, Decimal("10"), today).floor_divide(Decimal("11")) == Money.of(usd, Decimal("0.00"), today) def test_comparisons() -> None: ## First use `Money.NA`s: assert not (Money.NA < Money.NA) assert Money.NA <= Money.NA assert not (Money.NA > Money.NA) assert Money.NA >= Money.NA ## Try mixed: assert Money.NA < Money.of(usd, -one, today) assert Money.NA <= Money.of(usd, -one, today) assert not (Money.NA > Money.of(usd, -one, today)) assert not (Money.NA >= Money.of(usd, -one, today)) ## ... and: assert not (Money.of(usd, -one, today) < Money.NA) assert not (Money.of(usd, -one, today) <= Money.NA) assert Money.of(usd, -one, today) > Money.NA assert Money.of(usd, -one, today) >= Money.NA ## With defined values: assert not (Money.of(usd, zero, today) < Money.of(usd, zero, today)) assert Money.of(usd, zero, today) <= Money.of(usd, zero, today) assert not (Money.of(usd, zero, today) > Money.of(usd, zero, today)) assert Money.of(usd, zero, today) >= Money.of(usd, zero, today) ## ... and: assert Money.of(usd, zero, today) < Money.of(usd, one, today) assert Money.of(usd, zero, today) <= Money.of(usd, one, today) assert not (Money.of(usd, zero, today) > Money.of(usd, one, today)) assert not (Money.of(usd, zero, today) >= Money.of(usd, one, today)) ## ... and: assert not (Money.of(usd, one, today) < Money.of(usd, zero, today)) assert not (Money.of(usd, one, today) <= Money.of(usd, zero, today)) assert Money.of(usd, one, today) > Money.of(usd, zero, today) assert Money.of(usd, one, today) >= Money.of(usd, zero, today) def test_with() -> None: ## First use `Money.NA`s: assert Money.NA.with_ccy(usd) == Money.NA assert Money.NA.with_qty(one) == Money.NA assert Money.NA.with_dov(today) == Money.NA ## Now with some: assert Money.of(usd, zero, today).with_ccy(eur) == Money.of(eur, zero, today) assert Money.of(usd, zero, today).with_qty(one) == Money.of(usd, one, today) assert Money.of(usd, zero, today).with_dov(yesterday) == Money.of(usd, zero, yesterday) ## Extras: assert Money.of(usd, zero, today).with_qty(Decimal("0.005")) == Money.of(usd, zero, today) assert Money.of(usd, zero, today).with_qty(Decimal("0.054")) == Money.of(usd, Decimal("0.05"), today)
44.810384
115
0.644451
2b222b862f8f6a155c62d99558cc7143fd1602ca
408
py
Python
StackOverflow_RegEx.py
amukher3/Problem_solutions
8fa6014a91f295d08cafb989024caa91d99211d9
[ "Apache-2.0" ]
1
2021-12-28T08:58:51.000Z
2021-12-28T08:58:51.000Z
StackOverflow_RegEx.py
amukher3/Coding
a330cb04b5dd5cc1c3cf69249417a71586441bc7
[ "Apache-2.0" ]
null
null
null
StackOverflow_RegEx.py
amukher3/Coding
a330cb04b5dd5cc1c3cf69249417a71586441bc7
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Tue May 5 19:11:41 2020 @author: Abhishek Mukherjee https://stackoverflow.com/questions/61624992/python-regexpt-to-match-a-word-with-several-dots """ import re temp= re.split('\.',str1) temp2=[] [temp2.append(i) for i in temp if i.isalpha()] if ''.join(temp2)=='catfish': print('Match Found') else: print('Doesn\'t Match')
21.473684
94
0.612745
bb2174ede1e77273af83f24f856490466dfa08f1
3,127
py
Python
timeflux/core/worker.py
HerySon/timeflux
01a5a27a3368afe8b0c1da475f84618e11ffec3b
[ "MIT" ]
123
2019-01-09T08:57:39.000Z
2022-03-18T18:59:51.000Z
timeflux/core/worker.py
HerySon/timeflux
01a5a27a3368afe8b0c1da475f84618e11ffec3b
[ "MIT" ]
43
2019-03-08T10:16:39.000Z
2021-06-14T17:17:18.000Z
timeflux/core/worker.py
HerySon/timeflux
01a5a27a3368afe8b0c1da475f84618e11ffec3b
[ "MIT" ]
18
2019-03-26T08:51:21.000Z
2021-10-14T23:10:33.000Z
"""timeflux.core.worker: spawn processes.""" import importlib import logging import signal from multiprocessing import Process from timeflux.core.logging import get_queue, init_worker from timeflux.core.graph import Graph from timeflux.core.scheduler import Scheduler from timeflux.core.registry import Registry from timeflux.core.exceptions import * class Worker: """Spawn a process and launch a scheduler.""" def __init__(self, graph): self._graph = graph def run(self): """Run the process""" p = Process(target=self._run, args=(get_queue(),), name=self._graph["id"]) p.start() return p def load(self): # Build the graph and compute the traversal path g = Graph(self._graph) graph = g.build() path = g.traverse() # Set rate Registry.rate = self._graph["rate"] # Load nodes nodes = {} for step in path: node = self._load_node(graph.nodes[step["node"]], step["node"]) nodes[step["node"]] = node return path, nodes def _run(self, log_queue=None): # Initialize logging if log_queue: init_worker(log_queue) logger = logging.getLogger(__name__) scheduler = None try: # Initialize the graph and instantiate the nodes path, nodes = self.load() # Launch scheduler and run it scheduler = Scheduler(path, nodes, self._graph["rate"]) scheduler.run() except KeyboardInterrupt: # Ignore further interrupts signal.signal(signal.SIGINT, signal.SIG_IGN) logger.debug("Interrupting") except ( GraphDuplicateNode, GraphUndefinedNode, WorkerLoadError, ValidationError, ) as error: logger.error(error) except WorkerInterrupt as error: logger.debug(error) except Exception as error: logger.exception(error) if scheduler is not None: logger.info("Terminating") scheduler.terminate() def _load_node(self, node, nid): """Import a module and instantiate class.""" # Import module try: m = importlib.import_module(node["module"]) except ModuleNotFoundError as error: if node["module"] in error.msg: # Missing or invalid node raise WorkerLoadError( f"Node '{nid}': no module named '{node['module']}'" ) else: # Missing or invalid dependency raise error # Get class try: c = getattr(m, node["class"]) except AttributeError: raise WorkerLoadError( f"Node '{nid}': no class named '{node['class']}' in module '{node['module']}'" ) # Instantiate class try: n = c(**node["params"]) except TypeError as error: raise WorkerLoadError(f"Node '{nid}': {error}") return n
28.171171
94
0.56316
3b2acae7327584b76053a383a39c2dd837c78369
14,787
py
Python
deepchem/models/tests/test_torch_model.py
deloragaskins/deepchem
234ab699cdb997e5963966a8b6926cb2cda7c064
[ "MIT" ]
3,782
2016-02-21T03:53:11.000Z
2022-03-31T16:10:26.000Z
deepchem/models/tests/test_torch_model.py
deloragaskins/deepchem
234ab699cdb997e5963966a8b6926cb2cda7c064
[ "MIT" ]
2,666
2016-02-11T01:54:54.000Z
2022-03-31T11:14:33.000Z
deepchem/models/tests/test_torch_model.py
deloragaskins/deepchem
234ab699cdb997e5963966a8b6926cb2cda7c064
[ "MIT" ]
1,597
2016-02-21T03:10:08.000Z
2022-03-30T13:21:28.000Z
import os import pytest import deepchem as dc import numpy as np import math import unittest try: import torch import torch.nn.functional as F has_pytorch = True except: has_pytorch = False try: import wandb has_wandb = True except: has_wandb = False @pytest.mark.torch def test_overfit_subclass_model(): """Test fitting a TorchModel defined by subclassing Module.""" n_data_points = 10 n_features = 2 np.random.seed(1234) X = np.random.rand(n_data_points, n_features) y = (X[:, 0] > X[:, 1]).astype(np.float32) dataset = dc.data.NumpyDataset(X, y) class ExampleModel(torch.nn.Module): def __init__(self, layer_sizes): super(ExampleModel, self).__init__() self.layers = torch.nn.ModuleList() in_size = n_features for out_size in layer_sizes: self.layers.append(torch.nn.Linear(in_size, out_size)) in_size = out_size def forward(self, x): for i, layer in enumerate(self.layers): x = layer(x) if i < len(self.layers) - 1: x = F.relu(x) return torch.sigmoid(x), x pytorch_model = ExampleModel([10, 1]) model = dc.models.TorchModel( pytorch_model, dc.models.losses.SigmoidCrossEntropy(), output_types=['prediction', 'loss'], learning_rate=0.005) model.fit(dataset, nb_epoch=1000) prediction = np.squeeze(model.predict_on_batch(X)) assert np.array_equal(y, np.round(prediction)) metric = dc.metrics.Metric(dc.metrics.roc_auc_score) scores = model.evaluate(dataset, [metric]) assert scores[metric.name] > 0.9 @pytest.mark.torch def test_overfit_sequential_model(): """Test fitting a TorchModel defined as a sequential model.""" n_data_points = 10 n_features = 2 X = np.random.rand(n_data_points, n_features) y = (X[:, 0] > X[:, 1]).astype(np.float32) dataset = dc.data.NumpyDataset(X, y) pytorch_model = torch.nn.Sequential( torch.nn.Linear(2, 10), torch.nn.ReLU(), torch.nn.Linear(10, 1), torch.nn.Sigmoid()) model = dc.models.TorchModel( pytorch_model, dc.models.losses.BinaryCrossEntropy(), learning_rate=0.005) model.fit(dataset, nb_epoch=1000) prediction = np.squeeze(model.predict_on_batch(X)) assert np.array_equal(y, np.round(prediction)) metric = dc.metrics.Metric(dc.metrics.roc_auc_score) generator = model.default_generator(dataset, pad_batches=False) scores = model.evaluate_generator(generator, [metric]) assert scores[metric.name] > 0.9 @pytest.mark.torch def test_fit_use_all_losses(): """Test fitting a TorchModel and getting a loss curve back.""" n_data_points = 10 n_features = 2 X = np.random.rand(n_data_points, n_features) y = (X[:, 0] > X[:, 1]).astype(np.float32) dataset = dc.data.NumpyDataset(X, y) pytorch_model = torch.nn.Sequential( torch.nn.Linear(2, 10), torch.nn.ReLU(), torch.nn.Linear(10, 1), torch.nn.Sigmoid()) model = dc.models.TorchModel( pytorch_model, dc.models.losses.BinaryCrossEntropy(), learning_rate=0.005, log_frequency=10) losses = [] model.fit(dataset, nb_epoch=1000, all_losses=losses) # Each epoch is a single step for this model assert len(losses) == 100 assert np.count_nonzero(np.array(losses)) == 100 @pytest.mark.torch def test_fit_on_batch(): """Test fitting a TorchModel to individual batches.""" n_data_points = 10 n_features = 2 X = np.random.rand(n_data_points, n_features) y = (X[:, 0] > X[:, 1]).astype(np.float32) dataset = dc.data.NumpyDataset(X, y) pytorch_model = torch.nn.Sequential( torch.nn.Linear(2, 10), torch.nn.ReLU(), torch.nn.Linear(10, 1), torch.nn.Sigmoid()) model = dc.models.TorchModel( pytorch_model, dc.models.losses.BinaryCrossEntropy(), learning_rate=0.005) i = 0 for X, y, w, ids in dataset.iterbatches(model.batch_size, 500): i += 1 model.fit_on_batch(X, y, w, checkpoint=False) prediction = np.squeeze(model.predict_on_batch(X)) assert np.array_equal(y, np.round(prediction)) metric = dc.metrics.Metric(dc.metrics.roc_auc_score) generator = model.default_generator(dataset, pad_batches=False) scores = model.evaluate_generator(generator, [metric]) assert scores[metric.name] > 0.9 @pytest.mark.torch def test_checkpointing(): """Test loading and saving checkpoints with TorchModel.""" # Create two models using the same model directory. pytorch_model1 = torch.nn.Sequential(torch.nn.Linear(5, 10)) pytorch_model2 = torch.nn.Sequential(torch.nn.Linear(5, 10)) model1 = dc.models.TorchModel(pytorch_model1, dc.models.losses.L2Loss()) model2 = dc.models.TorchModel( pytorch_model2, dc.models.losses.L2Loss(), model_dir=model1.model_dir) # Check that they produce different results. X = np.random.rand(5, 5) y1 = model1.predict_on_batch(X) y2 = model2.predict_on_batch(X) assert not np.array_equal(y1, y2) # Save a checkpoint from the first model and load it into the second one, # and make sure they now match. model1.save_checkpoint() model2.restore() y3 = model1.predict_on_batch(X) y4 = model2.predict_on_batch(X) assert np.array_equal(y1, y3) assert np.array_equal(y1, y4) @pytest.mark.torch def test_fit_restore(): """Test specifying restore=True when calling fit().""" n_data_points = 10 n_features = 2 X = np.random.rand(n_data_points, n_features) y = (X[:, 0] > X[:, 1]).astype(np.float32) dataset = dc.data.NumpyDataset(X, y) # Train a model to overfit the dataset. pytorch_model = torch.nn.Sequential( torch.nn.Linear(2, 10), torch.nn.ReLU(), torch.nn.Linear(10, 1), torch.nn.Sigmoid()) model = dc.models.TorchModel( pytorch_model, dc.models.losses.BinaryCrossEntropy(), learning_rate=0.005) model.fit(dataset, nb_epoch=1000) prediction = np.squeeze(model.predict_on_batch(X)) assert np.array_equal(y, np.round(prediction)) # Create an identical model, do a single step of fitting with restore=True, # and make sure it got restored correctly. pytorch_model2 = torch.nn.Sequential( torch.nn.Linear(2, 10), torch.nn.ReLU(), torch.nn.Linear(10, 1), torch.nn.Sigmoid()) model2 = dc.models.TorchModel( pytorch_model2, dc.models.losses.BinaryCrossEntropy(), model_dir=model.model_dir) model2.fit(dataset, nb_epoch=1, restore=True) prediction = np.squeeze(model2.predict_on_batch(X)) assert np.array_equal(y, np.round(prediction)) @pytest.mark.torch def test_uncertainty(): """Test estimating uncertainty a TorchModel.""" n_samples = 30 n_features = 1 noise = 0.1 X = np.random.rand(n_samples, n_features) y = (10 * X + np.random.normal(scale=noise, size=(n_samples, n_features))) dataset = dc.data.NumpyDataset(X, y) # Build a model that predicts uncertainty. class PyTorchUncertainty(torch.nn.Module): def __init__(self): super(PyTorchUncertainty, self).__init__() self.hidden = torch.nn.Linear(n_features, 200) self.output = torch.nn.Linear(200, n_features) self.log_var = torch.nn.Linear(200, n_features) def forward(self, inputs): x, use_dropout = inputs x = self.hidden(x) if use_dropout: x = F.dropout(x, 0.1) output = self.output(x) log_var = self.log_var(x) var = torch.exp(log_var) return (output, var, output, log_var) def loss(outputs, labels, weights): diff = labels[0] - outputs[0] log_var = outputs[1] var = torch.exp(log_var) return torch.mean(diff * diff / var + log_var) class UncertaintyModel(dc.models.TorchModel): def default_generator(self, dataset, epochs=1, mode='fit', deterministic=True, pad_batches=True): for epoch in range(epochs): for (X_b, y_b, w_b, ids_b) in dataset.iterbatches( batch_size=self.batch_size, deterministic=deterministic, pad_batches=pad_batches): if mode == 'predict': dropout = np.array(False) else: dropout = np.array(True) yield ([X_b, dropout], [y_b], [w_b]) pytorch_model = PyTorchUncertainty() model = UncertaintyModel( pytorch_model, loss, output_types=['prediction', 'variance', 'loss', 'loss'], learning_rate=0.003) # Fit the model and see if its predictions are correct. model.fit(dataset, nb_epoch=2500) pred, std = model.predict_uncertainty(dataset) assert np.mean(np.abs(y - pred)) < 1.0 assert noise < np.mean(std) < 1.0 @pytest.mark.torch def test_saliency_mapping(): """Test computing a saliency map.""" n_tasks = 3 n_features = 5 pytorch_model = torch.nn.Sequential( torch.nn.Linear(n_features, 20), torch.nn.Tanh(), torch.nn.Linear(20, n_tasks)) model = dc.models.TorchModel(pytorch_model, dc.models.losses.L2Loss()) x = np.random.random(n_features) s = model.compute_saliency(x) assert s.shape[0] == n_tasks assert s.shape[1] == n_features # Take a tiny step in the direction of s and see if the output changes by # the expected amount. delta = 0.01 for task in range(n_tasks): norm = np.sqrt(np.sum(s[task]**2)) step = 0.5 * delta / norm pred1 = model.predict_on_batch((x + s[task] * step).reshape( (1, n_features))).flatten() pred2 = model.predict_on_batch((x - s[task] * step).reshape( (1, n_features))).flatten() assert np.allclose(pred1[task], (pred2 + norm * delta)[task], atol=1e-6) @pytest.mark.torch def test_saliency_shapes(): """Test computing saliency maps for multiple outputs with multiple dimensions.""" class SaliencyModel(torch.nn.Module): def __init__(self): super(SaliencyModel, self).__init__() self.layer1 = torch.nn.Linear(6, 4) self.layer2 = torch.nn.Linear(6, 5) def forward(self, x): x = torch.flatten(x) output1 = self.layer1(x).reshape(1, 4, 1) output2 = self.layer2(x).reshape(1, 1, 5) return output1, output2 pytorch_model = SaliencyModel() model = dc.models.TorchModel(pytorch_model, dc.models.losses.L2Loss()) x = np.random.random((2, 3)) s = model.compute_saliency(x) assert len(s) == 2 assert s[0].shape == (4, 1, 2, 3) assert s[1].shape == (1, 5, 2, 3) @pytest.mark.torch def test_tensorboard(): """Test logging to Tensorboard.""" n_data_points = 20 n_features = 2 X = np.random.rand(n_data_points, n_features) y = [[0.0, 1.0] for x in range(n_data_points)] dataset = dc.data.NumpyDataset(X, y) pytorch_model = torch.nn.Sequential( torch.nn.Linear(n_features, 2), torch.nn.Softmax(dim=1)) model = dc.models.TorchModel( pytorch_model, dc.models.losses.CategoricalCrossEntropy(), tensorboard=True, log_frequency=1) model.fit(dataset, nb_epoch=10) files_in_dir = os.listdir(model.model_dir) event_file = list(filter(lambda x: x.startswith("events"), files_in_dir)) assert len(event_file) > 0 event_file = os.path.join(model.model_dir, event_file[0]) file_size = os.stat(event_file).st_size assert file_size > 0 @pytest.mark.torch @unittest.skipIf((not has_pytorch) or (not has_wandb), 'PyTorch and/or Wandb is not installed') def test_wandblogger(): """Test logging to Weights & Biases.""" # Load dataset and Models tasks, datasets, transformers = dc.molnet.load_delaney( featurizer='ECFP', splitter='random') train_dataset, valid_dataset, test_dataset = datasets metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) wandblogger = dc.models.WandbLogger(anonymous="allow", save_run_history=True) pytorch_model = torch.nn.Sequential( torch.nn.Linear(1024, 1000), torch.nn.Dropout(p=0.5), torch.nn.Linear(1000, 1)) model = dc.models.TorchModel( pytorch_model, dc.models.losses.L2Loss(), wandb_logger=wandblogger) vc_train = dc.models.ValidationCallback(train_dataset, 1, [metric]) vc_valid = dc.models.ValidationCallback(valid_dataset, 1, [metric]) model.fit(train_dataset, nb_epoch=10, callbacks=[vc_train, vc_valid]) # call model.fit again to test multiple fit() calls model.fit(train_dataset, nb_epoch=10, callbacks=[vc_train, vc_valid]) wandblogger.finish() run_data = wandblogger.run_history valid_score = model.evaluate(valid_dataset, [metric], transformers) assert math.isclose( valid_score["pearson_r2_score"], run_data['eval/pearson_r2_score_(1)'], abs_tol=0.0005) @pytest.mark.torch def test_fit_variables(): """Test training a subset of the variables in a model.""" class VarModel(torch.nn.Module): def __init__(self, **kwargs): super(VarModel, self).__init__(**kwargs) self.var1 = torch.nn.Parameter(torch.Tensor([0.5])) self.var2 = torch.nn.Parameter(torch.Tensor([0.5])) def forward(self, inputs): return [self.var1, self.var2] def loss(outputs, labels, weights): return (outputs[0] * outputs[1] - labels[0])**2 pytorch_model = VarModel() model = dc.models.TorchModel(pytorch_model, loss, learning_rate=0.02) x = np.ones((1, 1)) vars = model.predict_on_batch(x) assert np.allclose(vars[0], 0.5) assert np.allclose(vars[1], 0.5) model.fit_generator([(x, x, x)] * 300) vars = model.predict_on_batch(x) assert np.allclose(vars[0], 1.0) assert np.allclose(vars[1], 1.0) model.fit_generator([(x, 2 * x, x)] * 300, variables=[pytorch_model.var1]) vars = model.predict_on_batch(x) assert np.allclose(vars[0], 2.0) assert np.allclose(vars[1], 1.0) model.fit_generator([(x, x, x)] * 300, variables=[pytorch_model.var2]) vars = model.predict_on_batch(x) assert np.allclose(vars[0], 2.0) assert np.allclose(vars[1], 0.5) @pytest.mark.torch def test_fit_loss(): """Test specifying a different loss function when calling fit().""" class VarModel(torch.nn.Module): def __init__(self): super(VarModel, self).__init__() self.var1 = torch.nn.Parameter(torch.Tensor([0.5])) self.var2 = torch.nn.Parameter(torch.Tensor([0.5])) def forward(self, inputs): return [self.var1, self.var2] def loss1(outputs, labels, weights): return (outputs[0] * outputs[1] - labels[0])**2 def loss2(outputs, labels, weights): return (outputs[0] + outputs[1] - labels[0])**2 pytorch_model = VarModel() model = dc.models.TorchModel(pytorch_model, loss1, learning_rate=0.01) x = np.ones((1, 1)) vars = model.predict_on_batch(x) assert np.allclose(vars[0], 0.5) assert np.allclose(vars[1], 0.5) model.fit_generator([(x, x, x)] * 300) vars = model.predict_on_batch(x) assert np.allclose(vars[0], 1.0) assert np.allclose(vars[1], 1.0) model.fit_generator([(x, 3 * x, x)] * 300, loss=loss2) vars = model.predict_on_batch(x) assert np.allclose(vars[0] + vars[1], 3.0)
32.933185
83
0.679651
c285f14dd3633d7ae6e34adc48efd0dd3e8c7433
2,098
py
Python
src/py_dss_interface/models/Loads/LoadsV.py
davilamds/py_dss_interface
a447c97787aeac962381db88dd622ccb235eef4b
[ "MIT" ]
null
null
null
src/py_dss_interface/models/Loads/LoadsV.py
davilamds/py_dss_interface
a447c97787aeac962381db88dd622ccb235eef4b
[ "MIT" ]
null
null
null
src/py_dss_interface/models/Loads/LoadsV.py
davilamds/py_dss_interface
a447c97787aeac962381db88dd622ccb235eef4b
[ "MIT" ]
null
null
null
# -*- encoding: utf-8 -*- """ Created by eniocc at 11/10/2020 """ from py_dss_interface.models.Text.Text import Text from py_dss_interface.models import Bridge from py_dss_interface.models.Base import Base from py_dss_interface.models.Loads.LoadsS import LoadsS class LoadsV(Base): """ This interface can be used to read/modify the properties of the Loads Class where the values are variants (the value can have different formats). The structure of the interface is as follows: void DSSLoadsV(int32_t Parameter, VARIANT *Argument); This interface returns a string, the variable “parameter” (Integer) is used to specify the property of the class to be used and the variable “argument” (Variant) is used to return the variant structure. """ def loads_all_names(self): """Allows to read the names of all the loads present in the active circuit. The result is delivered as variant, however, the content of this variant is an array of strings. """ return Bridge.var_array_function(self.dss_obj.DSSLoadsV, 0, None, '') def loads_read_zipv(self): """Allows to read the array of 7 elements (doubles) for ZIP property of the active Load object.""" return Bridge.var_array_function(self.dss_obj.DSSLoadsV, 1, None, '') def loads_write_zipv(self, argument): """Allows to write the array of 7 elements (doubles) for ZIP property of the active Load object. :param argument: Array of 7 coefficients: First 3 are ZIP weighting factors for real power (should sum to 1) Next 3 are ZIP weighting factors for reactive power (should sum to 1) Last 1 is cut-off voltage in p.u. of base kV; load is 0 below this cut-off No defaults; all coefficients must be specified if using model=8. """ argument = Base.check_string_param(argument) t = Text(self.dss_obj) load = LoadsS(self.dss_obj) load_name = load.loads_read_name() return t.text(f'edit Load.{load_name} zipv = {argument}')
45.608696
116
0.686368
0f21682116969c50b8c846f41baf71a86ae52533
2,571
py
Python
fleet-ctr/infer.py
wangjiawei04/ElasticRec
2c6414bbfb6e6c2139552f9c7f47731788690766
[ "Apache-2.0" ]
132
2019-12-03T06:48:48.000Z
2022-03-04T17:31:13.000Z
fleet-ctr/infer.py
wangjiawei04/ElasticRec
2c6414bbfb6e6c2139552f9c7f47731788690766
[ "Apache-2.0" ]
3
2019-12-17T03:13:56.000Z
2021-01-12T17:04:46.000Z
fleet-ctr/infer.py
wangjiawei04/ElasticRec
2c6414bbfb6e6c2139552f9c7f47731788690766
[ "Apache-2.0" ]
35
2019-12-04T03:07:55.000Z
2022-03-28T19:27:50.000Z
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import sys import os import paddle.fluid as fluid import numpy as np from criteo_pyreader import CriteoDataset import paddle from nets import ctr_dnn_model feature_names = [] with open(sys.argv[1]) as fin: for line in fin: feature_names.append(line.strip()) print(feature_names) sparse_feature_dim = 400000001 embedding_size = 9 sparse_input_ids = [fluid.layers.data(name= name, shape=[1], lod_level=1, dtype='int64') for name in feature_names] label = fluid.layers.data(name='label', shape=[1], dtype='int64') _words = sparse_input_ids + [label] exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_startup_program()) input_folder = "../data/infer_data" files = os.listdir(input_folder) infer_filelist = ["{}/{}".format(input_folder, f) for f in files] print(infer_filelist) criteo_dataset = CriteoDataset(feature_names) startup_program = fluid.framework.default_main_program() test_program = fluid.framework.default_main_program() test_reader = paddle.batch(criteo_dataset.test(infer_filelist), 1000) _, auc_var, _ = ctr_dnn_model(embedding_size, sparse_input_ids, label, sparse_feature_dim) [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(dirname="./saved_models/",executor=exe) with open('infer_programdesc', 'w+') as f: f.write(inference_program.to_string(True)) def set_zero(var_name): param = fluid.global_scope().var(var_name).get_tensor() param_array = np.zeros(param._get_dims()).astype("int64") param.set(param_array, fluid.CPUPlace()) auc_states_names = ['_generated_var_2', '_generated_var_3'] for name in auc_states_names: set_zero(name) inputs = _words feeder = fluid.DataFeeder(feed_list = inputs, place = fluid.CPUPlace()) for batch_id, data in enumerate(test_reader()): auc_val = exe.run(inference_program, feed=feeder.feed(data), fetch_list=fetch_targets) print(auc_val)
35.708333
125
0.748736
f4bacd26f0da8cde510362cbb06e778f730c421b
374
py
Python
kafka-diag/k-swarm.py
egustafson/toolbox
9152c06a32bc777241c7e2c85065dcfe4e5069c7
[ "Apache-2.0" ]
null
null
null
kafka-diag/k-swarm.py
egustafson/toolbox
9152c06a32bc777241c7e2c85065dcfe4e5069c7
[ "Apache-2.0" ]
null
null
null
kafka-diag/k-swarm.py
egustafson/toolbox
9152c06a32bc777241c7e2c85065dcfe4e5069c7
[ "Apache-2.0" ]
null
null
null
""" Usage k-swarm.py <hostname> """ from kafka import SimpleProducer, KafkaClient import logging import sys logging.basicConfig() kafka = KafkaClient(sys.argv[1] + ':9092') kafka.ensure_topic_exists(b'my-topic') producer = SimpleProducer(kafka) for ii in range(100000): msg = "msg-{}".format(ii) producer.send_messages(b'my-topic', msg) print("done.") ## end
16.26087
45
0.705882
605bf097750019bbe77779a01bfd8f9dbc4201ee
781
py
Python
src/114.py
zhaoyi3264/leetcode-solutions
1a3a2d441cdd07a17e80b0ea43b7b266844f530c
[ "MIT" ]
null
null
null
src/114.py
zhaoyi3264/leetcode-solutions
1a3a2d441cdd07a17e80b0ea43b7b266844f530c
[ "MIT" ]
null
null
null
src/114.py
zhaoyi3264/leetcode-solutions
1a3a2d441cdd07a17e80b0ea43b7b266844f530c
[ "MIT" ]
null
null
null
# Definition for a binary tree node. # class TreeNode: # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right class Solution: def flatten(self, root: TreeNode) -> None: """ Do not return anything, modify root in-place instead. """ if not root: return None cur = dummy = TreeNode() stack = [root] while stack: node = stack.pop(-1) if node.right: stack.append(node.right) node.right = None if node.left: stack.append(node.left) node.left = None cur.right = node cur = cur.right return dummy.right
28.925926
61
0.49936
fd8978289bef7c89712a7f1e4d9fae8e68ac45f6
2,492
py
Python
homework-4-s21-malwake-git/problem2.py
malwake-git/ECE20875
2348f638088359af962bc0d98e965c1ec0132686
[ "Apache-2.0" ]
null
null
null
homework-4-s21-malwake-git/problem2.py
malwake-git/ECE20875
2348f638088359af962bc0d98e965c1ec0132686
[ "Apache-2.0" ]
null
null
null
homework-4-s21-malwake-git/problem2.py
malwake-git/ECE20875
2348f638088359af962bc0d98e965c1ec0132686
[ "Apache-2.0" ]
null
null
null
def stencil(data, f, width): """ perform a stencil using the filter f with width w on list data output the resulting list note that if len(data) = k, len(output) = k - width + 1 f will accept as input a list of size width and return a single number :param data: list :param f: function :param width: int :return: list """ # Fill in out_list = []; in_list = []; i =0; for d in data: in_list.append(d) while i < (len(data) - width + 1): new_list = []; j = 0; while j < width <= len(in_list): new_list.append(in_list[j]); j = j + 1; del(in_list[0]); f_value = f(new_list); i = i + 1; out_list.append(f_value); return out_list pass def createBox(box): """ create a box filter from the input list "box" this filter should accept a list of length len(box) and return a simple convolution of it. the meaning of this box filter is as follows: for each element the input list l, multiple l[i] by box[i] sum the results of all of these multiplications return the sum So for a box of length 3, filter(l) should return: (box[0] * l[0] + box[1] * l[1] + box[2] * l[2]) The function createBox returns the box filter itself, as well as the length of the filter (which can be passed as an argument to conv) :param box: list :return: function, int """ # Fill in def boxFilter(l): # Fill in box_sum = 0; for index in range(0,len(box)): box_sum = box_sum + box[index] * l[index]; return box_sum; pass return boxFilter, len(box) if __name__ == '__main__': def movAvg(l): if len(l) != 3: print(len(l)) print("Calling movAvg with the wrong length list") exit(1) return float(sum(l)) / 3 def sumSq(l): if len(l) != 5: print("Calling sumSq with the wrong length list") exit(1) return sum([i ** 2 for i in l]) data = [2, 5, -10, -7, -7, -3, -1, 9, 8, -6] print(stencil(data, movAvg, 3)) print(stencil(data, sumSq, 5)) # note that this creates a moving average! boxF1, width1 = createBox([1.0 / 3, 1.0 / 3, 1.0 / 3]) print(stencil(data, boxF1, width1)) boxF2, width2 = createBox([-0.5, 0, 0, 0.5]) print(stencil(data, boxF2, width2))
24.673267
79
0.550963
942556417253a40249d1f21685350a430f09ce3c
3,268
py
Python
regfile/examples/chiprgfExample/sources/rgf1_defs.py
vhnatyk/vlsistuff
0981097bd19a0c482728dcc5048a3615ac9a9a90
[ "MIT" ]
1
2021-04-23T04:08:58.000Z
2021-04-23T04:08:58.000Z
regfile/examples/chiprgfExample/sources/rgf1_defs.py
psumesh/vlsistuff
1fe64b093d0581d99c7d826b74c31b8655fa0b31
[ "MIT" ]
null
null
null
regfile/examples/chiprgfExample/sources/rgf1_defs.py
psumesh/vlsistuff
1fe64b093d0581d99c7d826b74c31b8655fa0b31
[ "MIT" ]
null
null
null
ADDR_MAP = {} WIDTH_MAP = {} ronly = 0x10000 ADDR_MAP["ronly"] = 0x10000 WIDTH_MAP["ronly"] = 4 ronly2 = 0x10004 ADDR_MAP["ronly2"] = 0x10004 WIDTH_MAP["ronly2"] = 4 wonly = 0x10008 ADDR_MAP["wonly"] = 0x10008 WIDTH_MAP["wonly"] = 4 one = 0x1000c ADDR_MAP["one"] = 0x1000c WIDTH_MAP["one"] = 4 rega = 0x10010 ADDR_MAP["rega"] = 0x10010 WIDTH_MAP["rega"] = 4 control0 = 0x10014 ADDR_MAP["control0"] = 0x10014 WIDTH_MAP["control0"] = 4 statusa = 0x10018 ADDR_MAP["statusa"] = 0x10018 WIDTH_MAP["statusa"] = 4 regb = 0x1001c ADDR_MAP["regb"] = 0x1001c WIDTH_MAP["regb"] = 4 w1cc = 0x10020 ADDR_MAP["w1cc"] = 0x10020 WIDTH_MAP["w1cc"] = 4 badfwr = 0x10024 ADDR_MAP["badfwr"] = 0x10024 WIDTH_MAP["badfwr"] = 4 badfro = 0x10028 ADDR_MAP["badfro"] = 0x10028 WIDTH_MAP["badfro"] = 4 ramx = 0x10100 ADDR_MAP["ramx"] = 0x10100 WIDTH_MAP["ramx"] = 1024 rega = 0x20000 ADDR_MAP["rega"] = 0x20000 WIDTH_MAP["rega"] = 4 control0 = 0x20004 ADDR_MAP["control0"] = 0x20004 WIDTH_MAP["control0"] = 4 statusa = 0x20008 ADDR_MAP["statusa"] = 0x20008 WIDTH_MAP["statusa"] = 4 regb = 0x2000c ADDR_MAP["regb"] = 0x2000c WIDTH_MAP["regb"] = 4 extern = 0x20010 ADDR_MAP["extern"] = 0x20010 WIDTH_MAP["extern"] = 4 eth0tmp0 = 0x20100 ADDR_MAP["eth0tmp0"] = 0x20100 WIDTH_MAP["eth0tmp0"] = 4 eth0tmp1 = 0x20104 ADDR_MAP["eth0tmp1"] = 0x20104 WIDTH_MAP["eth0tmp1"] = 4 eth0tmp2 = 0x20108 ADDR_MAP["eth0tmp2"] = 0x20108 WIDTH_MAP["eth0tmp2"] = 4 eth1tmp0 = 0x20200 ADDR_MAP["eth1tmp0"] = 0x20200 WIDTH_MAP["eth1tmp0"] = 4 eth1tmp1 = 0x20204 ADDR_MAP["eth1tmp1"] = 0x20204 WIDTH_MAP["eth1tmp1"] = 4 eth1tmp2 = 0x20208 ADDR_MAP["eth1tmp2"] = 0x20208 WIDTH_MAP["eth1tmp2"] = 4 eth2tmp0 = 0x20300 ADDR_MAP["eth2tmp0"] = 0x20300 WIDTH_MAP["eth2tmp0"] = 4 eth2tmp1 = 0x20304 ADDR_MAP["eth2tmp1"] = 0x20304 WIDTH_MAP["eth2tmp1"] = 4 eth2tmp2 = 0x20308 ADDR_MAP["eth2tmp2"] = 0x20308 WIDTH_MAP["eth2tmp2"] = 4 eth3tmp0 = 0x20400 ADDR_MAP["eth3tmp0"] = 0x20400 WIDTH_MAP["eth3tmp0"] = 4 eth3tmp1 = 0x20404 ADDR_MAP["eth3tmp1"] = 0x20404 WIDTH_MAP["eth3tmp1"] = 4 eth3tmp2 = 0x20408 ADDR_MAP["eth3tmp2"] = 0x20408 WIDTH_MAP["eth3tmp2"] = 4 wider = 0x2040c ADDR_MAP["wider"] = 0x2040c WIDTH_MAP["wider"] = 16 longer = 0x2041c ADDR_MAP["longer"] = 0x2041c WIDTH_MAP["longer"] = 16 ronly = 0x2042c ADDR_MAP["ronly"] = 0x2042c WIDTH_MAP["ronly"] = 4 ronly2 = 0x20430 ADDR_MAP["ronly2"] = 0x20430 WIDTH_MAP["ronly2"] = 4 ldst_ram = 0x20800 ADDR_MAP["ldst_ram"] = 0x20800 WIDTH_MAP["ldst_ram"] = 512 ronly = 0x30000 ADDR_MAP["ronly"] = 0x30000 WIDTH_MAP["ronly"] = 4 ronly2 = 0x30004 ADDR_MAP["ronly2"] = 0x30004 WIDTH_MAP["ronly2"] = 4 wonly = 0x30008 ADDR_MAP["wonly"] = 0x30008 WIDTH_MAP["wonly"] = 4 one = 0x3000c ADDR_MAP["one"] = 0x3000c WIDTH_MAP["one"] = 4 rega = 0x30010 ADDR_MAP["rega"] = 0x30010 WIDTH_MAP["rega"] = 4 control0 = 0x30014 ADDR_MAP["control0"] = 0x30014 WIDTH_MAP["control0"] = 4 statusa = 0x30018 ADDR_MAP["statusa"] = 0x30018 WIDTH_MAP["statusa"] = 4 regb = 0x3001c ADDR_MAP["regb"] = 0x3001c WIDTH_MAP["regb"] = 4 w1cc = 0x30020 ADDR_MAP["w1cc"] = 0x30020 WIDTH_MAP["w1cc"] = 4 badfwr = 0x30024 ADDR_MAP["badfwr"] = 0x30024 WIDTH_MAP["badfwr"] = 4 badfro = 0x30028 ADDR_MAP["badfro"] = 0x30028 WIDTH_MAP["badfro"] = 4 ramx = 0x30100 ADDR_MAP["ramx"] = 0x30100 WIDTH_MAP["ramx"] = 1024
23.177305
30
0.71481
450942bf49f0dc6e76ff149c28cbd16648dc50d3
12,499
py
Python
sdk/python/pulumi_azure_native/web/web_app_site_extension_slot.py
pulumi-bot/pulumi-azure-native
f7b9490b5211544318e455e5cceafe47b628e12c
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_native/web/web_app_site_extension_slot.py
pulumi-bot/pulumi-azure-native
f7b9490b5211544318e455e5cceafe47b628e12c
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_native/web/web_app_site_extension_slot.py
pulumi-bot/pulumi-azure-native
f7b9490b5211544318e455e5cceafe47b628e12c
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from .. import _utilities, _tables from . import outputs __all__ = ['WebAppSiteExtensionSlot'] class WebAppSiteExtensionSlot(pulumi.CustomResource): def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, site_extension_id: Optional[pulumi.Input[str]] = None, slot: Optional[pulumi.Input[str]] = None, __props__=None, __name__=None, __opts__=None): """ Site Extension Information. API Version: 2020-10-01. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] name: Site name. :param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs. :param pulumi.Input[str] site_extension_id: Site extension name. :param pulumi.Input[str] slot: Name of the deployment slot. If a slot is not specified, the API uses the production slot. """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() if name is None and not opts.urn: raise TypeError("Missing required property 'name'") __props__['name'] = name if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__['resource_group_name'] = resource_group_name __props__['site_extension_id'] = site_extension_id if slot is None and not opts.urn: raise TypeError("Missing required property 'slot'") __props__['slot'] = slot __props__['authors'] = None __props__['comment'] = None __props__['description'] = None __props__['download_count'] = None __props__['extension_id'] = None __props__['extension_type'] = None __props__['extension_url'] = None __props__['feed_url'] = None __props__['icon_url'] = None __props__['installed_date_time'] = None __props__['installer_command_line_params'] = None __props__['kind'] = None __props__['license_url'] = None __props__['local_is_latest_version'] = None __props__['local_path'] = None __props__['project_url'] = None __props__['provisioning_state'] = None __props__['published_date_time'] = None __props__['summary'] = None __props__['system_data'] = None __props__['title'] = None __props__['type'] = None __props__['version'] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:web:WebAppSiteExtensionSlot"), pulumi.Alias(type_="azure-native:web/latest:WebAppSiteExtensionSlot"), pulumi.Alias(type_="azure-nextgen:web/latest:WebAppSiteExtensionSlot"), pulumi.Alias(type_="azure-native:web/v20160801:WebAppSiteExtensionSlot"), pulumi.Alias(type_="azure-nextgen:web/v20160801:WebAppSiteExtensionSlot"), pulumi.Alias(type_="azure-native:web/v20180201:WebAppSiteExtensionSlot"), pulumi.Alias(type_="azure-nextgen:web/v20180201:WebAppSiteExtensionSlot"), pulumi.Alias(type_="azure-native:web/v20181101:WebAppSiteExtensionSlot"), pulumi.Alias(type_="azure-nextgen:web/v20181101:WebAppSiteExtensionSlot"), pulumi.Alias(type_="azure-native:web/v20190801:WebAppSiteExtensionSlot"), pulumi.Alias(type_="azure-nextgen:web/v20190801:WebAppSiteExtensionSlot"), pulumi.Alias(type_="azure-native:web/v20200601:WebAppSiteExtensionSlot"), pulumi.Alias(type_="azure-nextgen:web/v20200601:WebAppSiteExtensionSlot"), pulumi.Alias(type_="azure-native:web/v20200901:WebAppSiteExtensionSlot"), pulumi.Alias(type_="azure-nextgen:web/v20200901:WebAppSiteExtensionSlot"), pulumi.Alias(type_="azure-native:web/v20201001:WebAppSiteExtensionSlot"), pulumi.Alias(type_="azure-nextgen:web/v20201001:WebAppSiteExtensionSlot")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(WebAppSiteExtensionSlot, __self__).__init__( 'azure-native:web:WebAppSiteExtensionSlot', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'WebAppSiteExtensionSlot': """ Get an existing WebAppSiteExtensionSlot resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__["authors"] = None __props__["comment"] = None __props__["description"] = None __props__["download_count"] = None __props__["extension_id"] = None __props__["extension_type"] = None __props__["extension_url"] = None __props__["feed_url"] = None __props__["icon_url"] = None __props__["installed_date_time"] = None __props__["installer_command_line_params"] = None __props__["kind"] = None __props__["license_url"] = None __props__["local_is_latest_version"] = None __props__["local_path"] = None __props__["name"] = None __props__["project_url"] = None __props__["provisioning_state"] = None __props__["published_date_time"] = None __props__["summary"] = None __props__["system_data"] = None __props__["title"] = None __props__["type"] = None __props__["version"] = None return WebAppSiteExtensionSlot(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def authors(self) -> pulumi.Output[Optional[Sequence[str]]]: """ List of authors. """ return pulumi.get(self, "authors") @property @pulumi.getter def comment(self) -> pulumi.Output[Optional[str]]: """ Site Extension comment. """ return pulumi.get(self, "comment") @property @pulumi.getter def description(self) -> pulumi.Output[Optional[str]]: """ Detailed description. """ return pulumi.get(self, "description") @property @pulumi.getter(name="downloadCount") def download_count(self) -> pulumi.Output[Optional[int]]: """ Count of downloads. """ return pulumi.get(self, "download_count") @property @pulumi.getter(name="extensionId") def extension_id(self) -> pulumi.Output[Optional[str]]: """ Site extension ID. """ return pulumi.get(self, "extension_id") @property @pulumi.getter(name="extensionType") def extension_type(self) -> pulumi.Output[Optional[str]]: """ Site extension type. """ return pulumi.get(self, "extension_type") @property @pulumi.getter(name="extensionUrl") def extension_url(self) -> pulumi.Output[Optional[str]]: """ Extension URL. """ return pulumi.get(self, "extension_url") @property @pulumi.getter(name="feedUrl") def feed_url(self) -> pulumi.Output[Optional[str]]: """ Feed URL. """ return pulumi.get(self, "feed_url") @property @pulumi.getter(name="iconUrl") def icon_url(self) -> pulumi.Output[Optional[str]]: """ Icon URL. """ return pulumi.get(self, "icon_url") @property @pulumi.getter(name="installedDateTime") def installed_date_time(self) -> pulumi.Output[Optional[str]]: """ Installed timestamp. """ return pulumi.get(self, "installed_date_time") @property @pulumi.getter(name="installerCommandLineParams") def installer_command_line_params(self) -> pulumi.Output[Optional[str]]: """ Installer command line parameters. """ return pulumi.get(self, "installer_command_line_params") @property @pulumi.getter def kind(self) -> pulumi.Output[Optional[str]]: """ Kind of resource. """ return pulumi.get(self, "kind") @property @pulumi.getter(name="licenseUrl") def license_url(self) -> pulumi.Output[Optional[str]]: """ License URL. """ return pulumi.get(self, "license_url") @property @pulumi.getter(name="localIsLatestVersion") def local_is_latest_version(self) -> pulumi.Output[Optional[bool]]: """ <code>true</code> if the local version is the latest version; <code>false</code> otherwise. """ return pulumi.get(self, "local_is_latest_version") @property @pulumi.getter(name="localPath") def local_path(self) -> pulumi.Output[Optional[str]]: """ Local path. """ return pulumi.get(self, "local_path") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ Resource Name. """ return pulumi.get(self, "name") @property @pulumi.getter(name="projectUrl") def project_url(self) -> pulumi.Output[Optional[str]]: """ Project URL. """ return pulumi.get(self, "project_url") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> pulumi.Output[Optional[str]]: """ Provisioning state. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="publishedDateTime") def published_date_time(self) -> pulumi.Output[Optional[str]]: """ Published timestamp. """ return pulumi.get(self, "published_date_time") @property @pulumi.getter def summary(self) -> pulumi.Output[Optional[str]]: """ Summary description. """ return pulumi.get(self, "summary") @property @pulumi.getter(name="systemData") def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']: """ The system metadata relating to this resource. """ return pulumi.get(self, "system_data") @property @pulumi.getter def title(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "title") @property @pulumi.getter def type(self) -> pulumi.Output[str]: """ Resource type. """ return pulumi.get(self, "type") @property @pulumi.getter def version(self) -> pulumi.Output[Optional[str]]: """ Version information. """ return pulumi.get(self, "version") def translate_output_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
37.64759
1,304
0.63037
922d659d333e9814127ac4103db551d466525c0a
172
py
Python
07_lecture_CICD/prime.py
MoStgt/CS50
62bd6eb38bea745c6356e1a8f03adb6ab70e2a37
[ "MIT" ]
null
null
null
07_lecture_CICD/prime.py
MoStgt/CS50
62bd6eb38bea745c6356e1a8f03adb6ab70e2a37
[ "MIT" ]
null
null
null
07_lecture_CICD/prime.py
MoStgt/CS50
62bd6eb38bea745c6356e1a8f03adb6ab70e2a37
[ "MIT" ]
null
null
null
import math def is_prime(n): if n<2: return False for i in range(2, int(math.sqrt(n))+1): if n % i == 0: return False return True
15.636364
43
0.511628
8d1eaddd7d79458e96a5a2805d903d3e428b0419
1,065
py
Python
cmsplugin_cascade/icon/forms.py
aDENTinTIME/djangocms-cascade
c38c1c5ad052dbe233b50fb833ad8e9a919014f2
[ "MIT" ]
null
null
null
cmsplugin_cascade/icon/forms.py
aDENTinTIME/djangocms-cascade
c38c1c5ad052dbe233b50fb833ad8e9a919014f2
[ "MIT" ]
null
null
null
cmsplugin_cascade/icon/forms.py
aDENTinTIME/djangocms-cascade
c38c1c5ad052dbe233b50fb833ad8e9a919014f2
[ "MIT" ]
null
null
null
from django.forms import widgets, CharField, ModelChoiceField from django.utils.translation import ugettext_lazy as _ from cmsplugin_cascade.models import IconFont from entangled.forms import EntangledModelFormMixin def get_default_icon_font(): try: return IconFont.objects.get(is_default=True).id except IconFont.DoesNotExist: return '' class IconFormMixin(EntangledModelFormMixin): icon_font = ModelChoiceField( IconFont.objects.all(), label=_("Font"), initial=get_default_icon_font, ) symbol = CharField( widget=widgets.HiddenInput(), label=_("Select Symbol"), ) class Meta: entangled_fields = {'glossary': ['icon_font', 'symbol']} def __init__(self, *args, **kwargs): if not getattr(self, 'require_icon', True): self.declared_fields['icon_font'].required = False self.declared_fields['icon_font'].empty_label = _("No Icon") self.declared_fields['symbol'].required = False super().__init__(*args, **kwargs)
30.428571
72
0.676056
8ef00b6b469d5102e6dcc7ed6d7ed852a18ecb89
4,890
py
Python
release/scripts/addons/power_sequencer/operators/gap_remove.py
naetherm/Bforartists
4d78856b76544b9eeb49e7dd388b4cf41d58d7e4
[ "Naumen", "Condor-1.1", "MS-PL" ]
3
2019-09-16T10:29:19.000Z
2022-02-11T14:43:18.000Z
release/scripts/addons/power_sequencer/operators/gap_remove.py
naetherm/Bforartists
4d78856b76544b9eeb49e7dd388b4cf41d58d7e4
[ "Naumen", "Condor-1.1", "MS-PL" ]
null
null
null
release/scripts/addons/power_sequencer/operators/gap_remove.py
naetherm/Bforartists
4d78856b76544b9eeb49e7dd388b4cf41d58d7e4
[ "Naumen", "Condor-1.1", "MS-PL" ]
null
null
null
# # Copyright (C) 2016-2019 by Nathan Lovato, Daniel Oakey, Razvan Radulescu, and contributors # # This file is part of Power Sequencer. # # Power Sequencer is free software: you can redistribute it and/or modify it under the terms of the # GNU General Public License as published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # Power Sequencer is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; # without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with Power Sequencer. If # not, see <https://www.gnu.org/licenses/>. # import bpy from operator import attrgetter from .utils.functions import slice_selection, sequencer_workaround_2_80_audio_bug from .utils.doc import doc_name, doc_idname, doc_brief, doc_description class POWER_SEQUENCER_OT_gap_remove(bpy.types.Operator): """ Remove gaps, starting from the first frame, with the ability to ignore locked strips """ doc = { "name": doc_name(__qualname__), "demo": "", "description": doc_description(__doc__), "shortcuts": [], "keymap": "Sequencer", } bl_idname = doc_idname(__qualname__) bl_label = doc["name"] bl_description = doc_brief(doc["description"]) bl_options = {"REGISTER", "UNDO"} ignore_locked: bpy.props.BoolProperty( name="Ignore Locked Strips", description="Remove gaps without moving locked strips", default=True, ) all: bpy.props.BoolProperty( name="Remove All", description="Remove all gaps starting from the time cursor", default=False, ) frame: bpy.props.IntProperty( name="Frame", description="Frame to remove gaps from, defaults at the time cursor", default=-1, ) @classmethod def poll(cls, context): return context.sequences def execute(self, context): frame = self.frame if self.frame >= 0 else context.scene.frame_current sequences = ( [s for s in context.sequences if not s.lock] if self.ignore_locked else context.sequences ) sequences = [ s for s in sequences if s.frame_final_start >= frame or s.frame_final_end > frame ] sequence_blocks = slice_selection(context, sequences) if not sequence_blocks: return {"FINISHED"} gap_frame = self.find_gap_frame(context, frame, sequence_blocks[0]) if gap_frame == -1: return {"FINISHED"} first_block_start = min( sequence_blocks[0], key=attrgetter("frame_final_start") ).frame_final_start blocks_after_gap = ( sequence_blocks[1:] if first_block_start <= gap_frame else sequence_blocks ) self.gaps_remove(context, blocks_after_gap, gap_frame) sequencer_workaround_2_80_audio_bug(context) return {"FINISHED"} def find_gap_frame(self, context, frame, sorted_sequences): """ Takes a list sequences sorted by frame_final_start """ strips_start = min(sorted_sequences, key=attrgetter("frame_final_start")).frame_final_start strips_end = max(sorted_sequences, key=attrgetter("frame_final_end")).frame_final_end gap_frame = -1 if strips_start > frame: strips_before_frame_start = [s for s in context.sequences if s.frame_final_end <= frame] frame_target = 0 if strips_before_frame_start: frame_target = max( strips_before_frame_start, key=attrgetter("frame_final_end") ).frame_final_end gap_frame = frame_target if frame_target < strips_start else frame else: gap_frame = strips_end return gap_frame def gaps_remove(self, context, sequence_blocks, gap_frame_start): """ Recursively removes gaps between blocks of sequences """ gap_frame = gap_frame_start for block in sequence_blocks: gap_size = block[0].frame_final_start - gap_frame if gap_size < 1: continue for s in block: try: s.frame_start -= gap_size except AttributeError: continue self.move_markers(context, gap_frame, gap_size) if not self.all: break gap_frame = block[-1].frame_final_end def move_markers(self, context, gap_frame, gap_size): markers = (m for m in context.scene.timeline_markers if m.frame > gap_frame) for m in markers: m.frame -= min({gap_size, m.frame - gap_frame})
35.955882
100
0.645808
bcc7f93bb01538aa1954d7876aa2a74d5e59487c
19,266
py
Python
qiskit_metal/renderers/renderer_base/renderer_base.py
warrench/qiskit-metal
2a918a84f1969ecc75e86598f601ad41013fe409
[ "Apache-2.0" ]
1
2022-01-27T07:11:49.000Z
2022-01-27T07:11:49.000Z
qiskit_metal/renderers/renderer_base/renderer_base.py
warrench/qiskit-metal
2a918a84f1969ecc75e86598f601ad41013fe409
[ "Apache-2.0" ]
null
null
null
qiskit_metal/renderers/renderer_base/renderer_base.py
warrench/qiskit-metal
2a918a84f1969ecc75e86598f601ad41013fe409
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # This code is part of Qiskit. # # (C) Copyright IBM 2017, 2021. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """QRenderer base class.""" import logging import inspect from copy import deepcopy from typing import TYPE_CHECKING from typing import List, Tuple, Union, Any, Iterable from typing import Dict as Dict_ from typing import List, Tuple, Union from qiskit_metal.designs import is_design from qiskit_metal.qgeometries import QGeometryTables from ... import Dict __all__ = ['QRenderer'] if TYPE_CHECKING: # For linting typechecking, import modules that can't be loaded here under normal conditions. # For example, I can't import QDesign, because it requires Qrenderer first. We have the # chicken and egg issue. from qiskit_metal.designs import QDesign class QRenderer(): """Abstract base class for all Renderers of Metal designs and their components and qgeometry. Handles: :: designs components qgeometry paths polys chips """ name = 'base' # overwrite this! """Name""" __loaded_renderers__ = set() __instantiated_renderers__ = dict() # overwrite this to add element extensions: see ELEMENT_COLUMNS # should be dict of dict with keys as element type, which contain (name, dype) pairs # e.g. element_extensions = dict( # base=dict(color=str, klayer=int), # path=dict(thickness=float, material=str, perfectE=bool), # poly=dict(thickness=float, material=str), ) element_extensions = dict() """Element extensions dictionary""" # TODO: To add: default parameters for the renderer for component element values. element_table_data = dict() @classmethod def load(cls): """Load the renderer and register all its extensions. Only performed once. Once complete, the renderer is added to the class attribute '__loaded_renderers__' of QRenderer Returns: bool: True if success, otherwise throws an error. """ # Check name name = cls.name if name in QRenderer.__loaded_renderers__: pass # print(f'Warning: Renderer name={name}, class={cls} already loaded. Doing nothing.') cls.populate_element_extensions() # Add element extensions # see docstring for QRenderer.element_extensions QGeometryTables.add_renderer_extension(cls.name, cls.element_extensions) # Moved to init for each renderer. # Add component extensions # to be used in the creation of default params for component qgeometry #raise NotImplementedError() # Finish and register officially as ready to use. QRenderer.__loaded_renderers__.add(name) # Reset the table for the next QRenderer. for table in cls.element_table_data.keys(): cls.element_extensions.pop(table, None) return True @classmethod def populate_element_extensions(cls): """Populate cls.element_extensions which will be used to create columns for tables in QGeometry tables. The structure of cls.element_table_data should be same as cls.element_extensions. """ for table, a_dict in cls.element_table_data.items(): cls.element_extensions[table] = dict() for col_name, col_value in a_dict.items(): # type will only tell out about the base class, won't tell you about the inheritance. cls.element_extensions[table][col_name] = type(col_value) @staticmethod def get_renderer(name: str): """Returns an already loaded and instantiated renderer. Args: name (str): rendering name Returns: QRenderer: Renderer with the given name """ if not name in QRenderer.__loaded_renderers__: print( 'ERROR: The renderer {name} has not yet been loaded. Please use the load function!' ) if not name in QRenderer.__instantiated_renderers__: print( 'ERROR: The renderer {name} has not yet been instantiated. Please instantiate the class!' ) return QRenderer.__instantiated_renderers__[name] def __init__(self, design: 'QDesign', initiate=True, render_template: Dict = None, render_options: Dict = None): """ Args: design (QDesign): The design initiate (bool): True to initiate the renderer. Defaults to True. render_template (Dict, optional): Typically used by GUI for template options for GDS. Defaults to None. render_options (Dict, optional): Used to override all options. Defaults to None. """ # TODO: check that the renderer has been loaded with load_renderer self.status = 'Not Init' assert is_design( design), "Erorr, for the design argument you must provide a\ a child instance of Metal QDesign class." self._design = design self.initiated = False if initiate: self.initate() # Register as an instantiated renderer. QRenderer.__instantiated_renderers__[self.name] = self # Options self._options = Dict() self.update_options(render_options=render_options, render_template=render_template) self.status = 'Init Completed' @property def options(self) -> Dict: """Options for the QRenderer.""" return self._options @property def design(self) -> 'QDesign': """Return a reference to the parent design object.""" return self._design @property def logger(self) -> logging.Logger: """Returns the logger.""" return self._design.logger @classmethod def _gather_all_children_default_options(cls) -> Dict: """From the base class of QRenderer, traverse the child classes to gather the .default_options for each child class. Note: If keys are the same for a child and grandchild, the grandchild will overwrite the child init method. Returns: Dict: Options from all children. """ options_from_children = Dict() parents = inspect.getmro(cls) # QRenderer is not expected to have default_options dict to add to QRenderer class. for child in parents[len(parents) - 2::-1]: # There is a developer agreement so the defaults for a renderer will be in a dict named default_options. if hasattr(child, 'default_options'): options_from_children = { **options_from_children, **child.default_options } return options_from_children @classmethod def _get_unique_class_name(cls) -> str: """Returns unique class name based on the module. Returns: str: Example: 'qiskit_metal.renders.renderer_gds.gds_renderer.QGDSRenderer' """ return f'{cls.__module__}.{cls.__name__}' @classmethod def _register_class_with_design(cls, design: 'QDesign', template_key: str, render_template: Dict): """Init function to register a renderer class with the design when first instantiated. Registers the renderer's template options. Args: design (QDesign): The parent design template_key (str): Key to use render_template (dict): template of render to copy """ # do not overwrite if template_key not in design.template_options: if not render_template: render_template = cls._gather_all_children_default_options() design.template_options[template_key] = deepcopy(render_template) @classmethod def get_template_options(cls, design: 'QDesign', render_template: Dict = None, logger_: logging.Logger = None, template_key: str = None) -> Dict: """Creates template options for the Metal QRenderer class required for the class to function, based on the design template; i.e., be created, made, and rendered. Provides the blank option structure required. The options can be extended by plugins, such as renderers. Args: design (QDesign): A design class. render_template (Dict, optional): Template options to overwrite the class ones. Defaults to None. logger_ (logging.Logger, optional): A logger for errors. Defaults to None. template_key (str, optional): The design.template_options key identifier. If None, then use _get_unique_class_name(). Defaults to None. Returns: Dict: Dictionary of renderer's default options based on design.template_options. """ # get key for templates if template_key is None: template_key = cls._get_unique_class_name() if template_key not in design.template_options: # Registers the renderer's template options. cls._register_class_with_design(design, template_key, render_template) # Only log warning, if template_key not registered within design. if template_key not in design.template_options: logger_ = logger_ or design.logger if logger_: logger_.error( f'ERROR in creating renderer {cls.__name__}!\nThe default ' f'options for the renderer class {cls.__name__} are missing' ) # Specific object render template options options = deepcopy(Dict(design.template_options[template_key])) return options def parse_value(self, value: Union[Any, List, Dict, Iterable]) -> Any: """Same as design.parse_value. See design for help. Returns: object: Parsed value of input. """ return self.design.parse_value(value) def update_options(self, render_options: Dict = None, render_template: Dict = None): """If template options has not been set for this renderer, then gather all the default options for children and add to design. The GUI would use this to store the template options. Then give the template options to render to store in self.options. Then user can over-ride the render_options. Args: render_options (Dict, optional): If user wants to over-ride the template options. Defaults to None. render_template (Dict, optional): All the template options for each child. Defaults to None. """ self.options.update( self.get_template_options(self.design, render_template=render_template)) if render_options: self.options.update(render_options) def add_table_data_to_QDesign(self, class_name: str): """During init of renderer, this needs to happen. In particular, each renderer needs to update custom columns and values within QDesign. Args: class_name (str): Name from cls.name for each renderer. """ status = set() if not isinstance(QRenderer.name, str): self.logger.warning( f'In add_table_data_to_QDesign, cls.str={QRenderer.name} is not a str.' ) return for table, a_dict in self.element_table_data.items(): for col_name, col_value in a_dict.items(): status = self.design.add_default_data_for_qgeometry_tables( table, class_name, col_name, col_value) if 5 not in status: self.logger.warning( f'col_value={col_value} not added to QDesign') def initate(self, re_initiate=False): """Call any initiations steps required to be performed a single time before rendering, such as connecting to some API or COM, or importing the correct material libraries, etc. Overwrite `initiate_renderer`. Args: re_initiate (bool) : If False will only apply this function once. If True, will re-apply. Defaults to False. Returns: bool: was a re_initiation applied or not """ if not re_initiate: if self.initiated: return False self.initiated = True self._initate_renderer() return True def get_unique_component_ids( self, highlight_qcomponents: Union[list, None] = None) -> Tuple[list, int]: """Confirm the list doesn't have names of components repeated. Confirm that the name of component exists in QDesign. If QDesign doesn't contain any component, or if all components in QDesign are found in highlight_qcomponents, return an empty list; otherwise return a list of unique components to be sent to the renderer. The second returned item, an integer, specifies which of these 3 cases applies. Args: highlight_qcomponents (Union[list, None], optional): Components to render. Defaults to None. Returns: Tuple[list, int]: Empty or partial list of components in QDesign. """ highlight_qcomponents = highlight_qcomponents if highlight_qcomponents else [] unique_qcomponents = set(highlight_qcomponents) for qcomp in unique_qcomponents: if qcomp not in self.design.name_to_id: self.logger.warning( f'The component={qcomp} in highlight_qcomponents not' ' in QDesign.') return [], 2 # Invalid if len(unique_qcomponents) in (0, len(self.design.components)): return [], 1 # Everything selected return [self.design.name_to_id[elt] for elt in unique_qcomponents ], 0 # Subset selected def _initate_renderer(self): """Call any initiations steps required to be performed a single time before rendering, such as connecting to some API or COM, or importing the correct material libraries, etc. Returns: bool: Always returns True """ return True def post_render(self): """Any calls that one may want to make after a rendering is complete.""" pass def render_design(self): """Renders all design chips and components.""" self.initate() self.render_chips() self.render_components() # ... def render_chips(self, all_chips): """Render all chips of the design. Calls render_chip for each chip. Args: all_chips (list): All chip names to render. Raises: NotImplementedError: Function not written yet """ # To avoid linting message in a subclass: Method 'render_chips' is # abstract in class 'QRenderer' but is not overridden, # have this method do something. type(all_chips) raise NotImplementedError() def render_chip(self, name): """Render the given chip. Args: name (str): Chip to render. Raises: NotImplementedError: Function not written yet """ # To avoid linting message in a subclass: Method 'render_chip' is # abstract in class 'QRenderer' but is not overridden, # have this method do something. type(name) raise NotImplementedError() def render_components(self, selection=None): """Render all components of the design. If selection is none, then render all components. Args: selection (QComponent): Component to render. Raises: NotImplementedError: Function not written yet """ # To avoid linting message in a subclass: Method 'render_component' # is abstract in class 'QRenderer' but is not overridden, # have this method do something. type(selection) raise NotImplementedError() def render_component(self, qcomponent): """Render the specified qcomponent. Args: qcomponent (QComponent): QComponent to render. Raises: NotImplementedError: Function not written yet """ # To avoid linting message in a subclass: Method 'render_component' # is abstract in class 'QRenderer' but is not overridden, # have this method do something. type(qcomponent) raise NotImplementedError() def render_element(self, element): """Render the specified element. Args: element (Element): Element to render Raises: NotImplementedError: Function not written yet """ # To avoid linting message in a subclass: Method 'render_element' is # abstract in class 'QRenderer' but is not overridden, # have this method do something. type(element) raise NotImplementedError() # if isinstance(element, path): # self.render_element_path(element) # elif isinstance(element, poly): # self.render_element_poly(element) # else: # self.logger.error('RENDERER ERROR: Unknown element {element}') def render_element_path(self, path): """Render an element path. Args: path (str): Path to render. Raises: NotImplementedError: Function not written yet """ # To avoid linting message in a subclass: Method 'render_element_path' # is abstract in class 'QRenderer' but is not overridden, # have this method do something. type(path) raise NotImplementedError() def render_element_poly(self, poly): """Render an element poly. Args: poly (Poly): Poly to render Raises: NotImplementedError: Function not written yet """ # To avoid linting message in a subclass: Method 'render_element_poly' # is abstract in class 'QRenderer' but is not overridden # have this method do something. type(poly) raise NotImplementedError()
35.285714
116
0.615437
b1c5833621be43afabc52a6288e75669f34866d2
2,140
py
Python
octopose/nu.py
arunchandarQA/octopose
8227cf432560c72ea9a741991dda1f2ec4bca248
[ "MIT" ]
7
2018-02-14T15:42:52.000Z
2021-09-15T16:37:56.000Z
octopose/nu.py
arunchandarQA/octopose
8227cf432560c72ea9a741991dda1f2ec4bca248
[ "MIT" ]
42
2018-02-14T13:55:27.000Z
2022-02-12T08:34:29.000Z
octopose/nu.py
arunchandarQA/octopose
8227cf432560c72ea9a741991dda1f2ec4bca248
[ "MIT" ]
3
2018-03-14T10:58:14.000Z
2019-11-20T11:18:12.000Z
""" This module calls through to nuget to install NuGet packages""" # MIT License # # Copyright (c) 2018 Huddle # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import os from octopose import config class Nu: def __init__(self, subprocess_runner): """Nu interacts with nuget.exe by running commands in a subprocess""" self.subprocess_runner = subprocess_runner self.nuget_exe = "{0}\\third_party\\NuGet.exe".format( os.path.dirname(os.path.abspath(__file__))) def get_deployable(self, name, version, staging_location): """ Get deployables from pacakage sources for local deployment. """ for source in config.PACKAGE_SOURCES: args = "{0} install {1} -Source {2} -OutputDirectory {3}".format(self.nuget_exe, name, source, staging_location) if version is not None: args = args + " -Version {0}".format(version) self.subprocess_runner.run(args, "Getting of {0} at version {1} failed".format( name, version), self.nuget_exe)
45.531915
106
0.693925
5062c15281fc70daa707352e470aac7e5d364116
8,107
py
Python
apps/authentication/api/views.py
mariusaarsnes/onlineweb4
3495321dabfd7a7236e6d841b004e9f855b6f30e
[ "MIT" ]
null
null
null
apps/authentication/api/views.py
mariusaarsnes/onlineweb4
3495321dabfd7a7236e6d841b004e9f855b6f30e
[ "MIT" ]
null
null
null
apps/authentication/api/views.py
mariusaarsnes/onlineweb4
3495321dabfd7a7236e6d841b004e9f855b6f30e
[ "MIT" ]
null
null
null
from django.contrib.auth.models import Group from guardian.shortcuts import get_objects_for_user from rest_framework import mixins, status, viewsets from rest_framework.permissions import AllowAny, IsAuthenticated from rest_framework.response import Response from apps.authentication.models import Email, GroupMember, GroupRole, OnlineGroup from apps.authentication.models import OnlineUser as User from apps.authentication.models import Position, SpecialPosition from apps.authentication.serializers import (EmailCreateSerializer, EmailReadOnlySerializer, EmailUpdateSerializer, GroupMemberCreateSerializer, GroupMemberReadOnlySerializer, GroupMemberUpdateSerializer, GroupReadOnlySerializer, GroupRoleReadOnlySerializer, OnlineGroupCreateOrUpdateSerializer, OnlineGroupReadOnlySerializer, PositionCreateAndUpdateSerializer, PositionReadOnlySerializer, SpecialPositionSerializer, UserCreateSerializer, UserReadOnlySerializer, UserUpdateSerializer) class UserViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins.RetrieveModelMixin, mixins.CreateModelMixin, mixins.UpdateModelMixin): """ Viewset for User serializer. Supports filtering on 'first_name', 'last_name', 'email' """ permission_classes = (AllowAny,) filterset_fields = ('first_name', 'last_name', 'rfid',) def get_queryset(self): """ Permitted users can view users they have permission for to view. Users can only update/delete their own users. """ user = self.request.user if self.action in ['list', 'retrieve']: if not user.is_authenticated: return User.objects.none() if user.is_superuser: return User.objects.all() return get_objects_for_user(user, 'authentication.view_onlineuser') if self.action in ['list', 'retrieve', 'destroy']: return User.objects.filter(user=user) return super().get_queryset() def get_serializer_class(self): if self.action == 'create': return UserCreateSerializer if self.action in ['update', 'partial_update']: return UserUpdateSerializer if self.action in ['list', 'retrieve']: return UserReadOnlySerializer return super().get_serializer_class() class EmailViewSet(viewsets.ModelViewSet): permission_classes = (IsAuthenticated,) def get_queryset(self): return Email.objects.filter(user=self.request.user) def get_serializer_class(self): if self.action == 'create': return EmailCreateSerializer if self.action in ['update', 'partial_update']: return EmailUpdateSerializer if self.action in ['list', 'retrieve']: return EmailReadOnlySerializer return super().get_serializer_class() def destroy(self, request, *args, **kwargs): instance: Email = self.get_object() if instance.primary: return Response({ 'message': 'Du kan ikke slette en primær-epost. Du må først velge en annen epost som ' 'primær for å kunne slette denne.' }, status=status.HTTP_400_BAD_REQUEST) class PositionViewSet(viewsets.ModelViewSet): permission_classes = (IsAuthenticated,) def get_serializer_class(self): if self.action in ['create', 'update', 'partial_update']: return PositionCreateAndUpdateSerializer if self.action in ['list', 'retrieve']: return PositionReadOnlySerializer return super().get_serializer_class() def get_queryset(self): user = self.request.user return Position.objects.filter(user=user) class SpecialPositionViewSet(viewsets.ReadOnlyModelViewSet): serializer_class = SpecialPositionSerializer permission_classes = (IsAuthenticated,) def get_queryset(self): user = self.request.user return SpecialPosition.objects.filter(user=user) class GroupViewSet(viewsets.ReadOnlyModelViewSet): permission_classes = (AllowAny,) queryset = Group.objects.all() serializer_class = GroupReadOnlySerializer class OnlineGroupViewSet(viewsets.ModelViewSet): permission_classes = (AllowAny,) @staticmethod def get_editable_groups_for_user(user: User, action: str): if not user.is_authenticated: return OnlineGroup.objects.none() all_groups = OnlineGroup.objects.all() """ Updating or deleting requires admin rights or a leader role """ if action in ['create', 'update', 'partial_update', 'destroy']: if user.is_superuser: return all_groups """ Group leaders or deputy leaders should be allowed to edit groups """ allowed_group_ids = [ group.id for group in all_groups if group.leader == user or group.deputy_leader == user ] return OnlineGroup.objects.filter(pk__in=allowed_group_ids) def get_queryset(self): if self.action in ['list', 'retrieve']: return OnlineGroup.objects.all() user = self.request.user return self.get_editable_groups_for_user(user, self.action) def get_serializer_class(self): if self.action == 'create': return OnlineGroupCreateOrUpdateSerializer if self.action in ['update', 'partial_update']: return OnlineGroupCreateOrUpdateSerializer if self.action in ['list', 'retrieve']: return OnlineGroupReadOnlySerializer return super().get_serializer_class() def create(self, request, *args, **kwargs): if request.user.is_superuser or request.user.has_perm('authentication.add_onlinegroup'): return super().create(request, *args, **kwargs) return Response({ 'message': 'Du har ikke tillatelse til å opprette grupper' }, status=status.HTTP_403_FORBIDDEN) class GroupMemberViewSet(viewsets.ModelViewSet): permission_classes = (AllowAny,) @staticmethod def get_allowed_memberships_for_user(user: User, action: str): allowed_groups = OnlineGroupViewSet.get_editable_groups_for_user(user, action) allowed_membership_ids = [] for group in allowed_groups: for membership in group.members.all(): allowed_membership_ids.append(membership.id) return GroupMember.objects.filter(pk__in=allowed_membership_ids) def get_queryset(self): if self.action in ['list', 'retrieve']: return GroupMember.objects.all() user = self.request.user return self.get_allowed_memberships_for_user(user, self.action) def get_serializer_class(self): if self.action == 'create': return GroupMemberCreateSerializer if self.action in ['update', 'partial_update']: return GroupMemberUpdateSerializer if self.action in ['list', 'retrieve']: return GroupMemberReadOnlySerializer return super().get_serializer_class() def create(self, request, *args, **kwargs): if request.user.is_superuser or request.user.has_perm('authentication.add_groupmember'): return super().create(request, *args, **kwargs) return Response({ 'message': 'Du har ikke tillatelse til å opprette gruppemedlemskap' }, status=status.HTTP_403_FORBIDDEN) class GroupRoleViewSet(viewsets.ReadOnlyModelViewSet): permission_classes = (AllowAny,) serializer_class = GroupRoleReadOnlySerializer queryset = GroupRole.objects.all()
38.240566
103
0.647959
f801c16058f6dc3fc4a17dea54bf8f6dd6379b13
118
py
Python
mesos_master/datadog_checks/mesos_master/__init__.py
dvanderveer/integrations-core
41dd9950296455457c9b7342584153678503d5aa
[ "BSD-3-Clause" ]
null
null
null
mesos_master/datadog_checks/mesos_master/__init__.py
dvanderveer/integrations-core
41dd9950296455457c9b7342584153678503d5aa
[ "BSD-3-Clause" ]
null
null
null
mesos_master/datadog_checks/mesos_master/__init__.py
dvanderveer/integrations-core
41dd9950296455457c9b7342584153678503d5aa
[ "BSD-3-Clause" ]
null
null
null
from . import mesos_master MesosMaster = mesos_master.MesosMaster __version__ = "1.2.0" __all__ = ['mesos_master']
14.75
38
0.754237
786b80a550fac89c9122030b9aedeb473be4a0b4
8,748
py
Python
src/cpu/pred/BranchPredictor.py
LDelhez/Gem5
28ad7cf87c60ad74192caf4ec8771a7060da7dc0
[ "BSD-3-Clause" ]
1
2019-01-16T03:54:01.000Z
2019-01-16T03:54:01.000Z
src/cpu/pred/BranchPredictor.py
LDelhez/Gem5
28ad7cf87c60ad74192caf4ec8771a7060da7dc0
[ "BSD-3-Clause" ]
null
null
null
src/cpu/pred/BranchPredictor.py
LDelhez/Gem5
28ad7cf87c60ad74192caf4ec8771a7060da7dc0
[ "BSD-3-Clause" ]
null
null
null
# Copyright (c) 2012 Mark D. Hill and David A. Wood # Copyright (c) 2015 The University of Wisconsin # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Nilay Vaish and Dibakar Gope from m5.SimObject import SimObject from m5.params import * from m5.proxy import * class BranchPredictor(SimObject): type = 'BranchPredictor' cxx_class = 'BPredUnit' cxx_header = "cpu/pred/bpred_unit.hh" abstract = True numThreads = Param.Unsigned(Parent.numThreads, "Number of threads") BTBEntries = Param.Unsigned(4096, "Number of BTB entries") BTBTagSize = Param.Unsigned(16, "Size of the BTB tags, in bits") RASSize = Param.Unsigned(16, "RAS size") instShiftAmt = Param.Unsigned(2, "Number of bits to shift instructions by") useIndirect = Param.Bool(True, "Use indirect branch predictor") indirectHashGHR = Param.Bool(True, "Hash branch predictor GHR") indirectHashTargets = Param.Bool(True, "Hash path history targets") indirectSets = Param.Unsigned(256, "Cache sets for indirect predictor") indirectWays = Param.Unsigned(2, "Ways for indirect predictor") indirectTagSize = Param.Unsigned(16, "Indirect target cache tag bits") indirectPathLength = Param.Unsigned(3, "Previous indirect targets to use for path history") storage = Param.Unsigned(0, "Total storage") class LocalBP(BranchPredictor): type = 'LocalBP' cxx_class = 'LocalBP' cxx_header = "cpu/pred/2bit_local.hh" localPredictorSize = Param.Unsigned(2048, "Size of local predictor") localCtrBits = Param.Unsigned(2, "Bits per counter") class TournamentBP(BranchPredictor): type = 'TournamentBP' cxx_class = 'TournamentBP' cxx_header = "cpu/pred/tournament.hh" localPredictorSize = Param.Unsigned(2048, "Size of local predictor") localCtrBits = Param.Unsigned(2, "Bits per counter") localHistoryTableSize = Param.Unsigned(2048, "size of local history table") globalPredictorSize = Param.Unsigned(8192, "Size of global predictor") globalCtrBits = Param.Unsigned(2, "Bits per counter") choicePredictorSize = Param.Unsigned(8192, "Size of choice predictor") choiceCtrBits = Param.Unsigned(2, "Bits of choice counters") class BiModeBP(BranchPredictor): type = 'BiModeBP' cxx_class = 'BiModeBP' cxx_header = "cpu/pred/bi_mode.hh" globalPredictorSize = Param.Unsigned(8192, "Size of global predictor") globalCtrBits = Param.Unsigned(2, "Bits per counter") choicePredictorSize = Param.Unsigned(8192, "Size of choice predictor") choiceCtrBits = Param.Unsigned(2, "Bits of choice counters") # TAGE branch predictor as described in https://www.jilp.org/vol8/v8paper1.pdf # The default sizes below are for the 8C-TAGE configuration (63.5 Kbits) class TAGE(BranchPredictor): type = 'TAGE' cxx_class = 'TAGE' cxx_header = "cpu/pred/tage.hh" nHistoryTables = Param.Unsigned(7, "Number of history tables") minHist = Param.Unsigned(5, "Minimum history size of LTAGE") maxHist = Param.Unsigned(130, "Maximum history size of LTAGE") tagTableTagWidths = VectorParam.Unsigned( [0, 9, 9, 10, 10, 11, 11, 12], "Tag size in TAGE tag tables") logTagTableSizes = VectorParam.Int( [13, 9, 9, 9, 9, 9, 9, 9], "Log2 of TAGE table sizes") logRatioBiModalHystEntries = Param.Unsigned(2, "Log num of prediction entries for a shared hysteresis bit " \ "for the Bimodal") tagTableCounterBits = Param.Unsigned(3, "Number of tag table counter bits") tagTableUBits = Param.Unsigned(2, "Number of tag table u bits") histBufferSize = Param.Unsigned(2097152, "A large number to track all branch histories(2MEntries default)") pathHistBits = Param.Unsigned(16, "Path history size") logUResetPeriod = Param.Unsigned(18, "Log period in number of branches to reset TAGE useful counters") useAltOnNaBits = Param.Unsigned(4, "Size of the USE_ALT_ON_NA counter") # LTAGE branch predictor as described in # https://www.irisa.fr/caps/people/seznec/L-TAGE.pdf # It is basically a TAGE predictor plus a loop predictor # The differnt TAGE sizes are updated according to the paper values (256 Kbits) class LTAGE(TAGE): type = 'LTAGE' cxx_class = 'LTAGE' cxx_header = "cpu/pred/ltage.hh" nHistoryTables = 12 minHist = 4 maxHist = 640 tagTableTagWidths = [0, 7, 7, 8, 8, 9, 10, 11, 12, 12, 13, 14, 15] logTagTableSizes = [14, 10, 10, 11, 11, 11, 11, 10, 10, 10, 10, 9, 9] logUResetPeriod = 19 logSizeLoopPred = Param.Unsigned(8, "Log size of the loop predictor") withLoopBits = Param.Unsigned(7, "Size of the WITHLOOP counter") loopTableAgeBits = Param.Unsigned(8, "Number of age bits per loop entry") loopTableConfidenceBits = Param.Unsigned(2, "Number of confidence bits per loop entry") loopTableTagBits = Param.Unsigned(14, "Number of tag bits per loop entry") loopTableIterBits = Param.Unsigned(14, "Nuber of iteration bits per loop") logLoopTableAssoc = Param.Unsigned(2, "Log loop predictor associativity") class StatisticalCorrector(BranchPredictor): type = 'StatisticalCorrector' cxx_class = 'StatisticalCorrector' cxx_header = "cpu/pred/statistical_corrector.hh" logSize = Param.Unsigned(12, "Log size of the corrector") numTables = Param.Unsigned(5, "Number of tables") tableEntryBits = Param.Unsigned(6, "Number of bits per entry") basePredictor = Param.BranchPredictor(TAGE(), "Base predictor") class LStatisticalCorrector(BranchPredictor): type = 'LStatisticalCorrector' cxx_class = 'LStatisticalCorrector' cxx_header = "cpu/pred/local_statistical_corrector.hh" numHistories = Param.Unsigned(64, "Number of local histories") numTables = Param.Unsigned(5, "Number of tables") entryBits = Param.Unsigned(6, "Number of bits per entry") localHistoryLengths = VectorParam.Unsigned( [0, 4, 10, 17, 31], "Lengths of the local histories") logSize = Param.Unsigned(10, "Log size of the corrector") basePredictor = Param.BranchPredictor(TAGE(), "Base predictor") class IUM(BranchPredictor): type = 'IUM' cxx_class = 'IUM' cxx_header = "cpu/pred/ium.hh" logSize = Param.Unsigned(6, "Log size of the ium.") basePredictor = Param.BranchPredictor(TAGE(), "Base predictor") class STAGE(StatisticalCorrector): pass class ITAGE(IUM): pass class ISLTAGE(STAGE): basePredictor = ITAGE() class TAGELSC(LStatisticalCorrector): basePredictor = ITAGE() class LoopPredictor(BranchPredictor): type = 'LoopPredictor' cxx_class = 'LoopPredictor' cxx_header = "cpu/pred/loop_predictor.hh" logSizeLoopPred = Param.Unsigned(8, "Log size of the loop predictor") withLoopBits = Param.Unsigned(7, "Size of the WITHLOOP counter") loopTableAgeBits = Param.Unsigned(8, "Number of age bits per loop entry") loopTableConfidenceBits = Param.Unsigned(2, "Number of confidence bits per loop entry") loopTableTagBits = Param.Unsigned(14, "Number of tag bits per loop entry") loopTableIterBits = Param.Unsigned(14, "Nuber of iteration bits per loop") logLoopTableAssoc = Param.Unsigned(2, "Log loop predictor associativity") basePredictor = Param.BranchPredictor(TAGE(), "Base predictor")
42.673171
79
0.72588
e5ef8cf54c6cc0a2e89a1b1ef26df8e37cf85eeb
2,246
py
Python
CTFd/api/v1/tags.py
atti1a/CTFd
6c5c63d667a17aec159c8e26ea53dccfbc4d0fa3
[ "Apache-2.0" ]
501
2019-11-22T07:19:06.000Z
2022-03-28T07:16:40.000Z
CTFd/api/v1/tags.py
atti1a/CTFd
6c5c63d667a17aec159c8e26ea53dccfbc4d0fa3
[ "Apache-2.0" ]
58
2019-12-02T13:59:15.000Z
2022-02-26T01:53:52.000Z
CTFd/api/v1/tags.py
atti1a/CTFd
6c5c63d667a17aec159c8e26ea53dccfbc4d0fa3
[ "Apache-2.0" ]
128
2019-12-02T11:15:58.000Z
2022-03-27T08:25:59.000Z
from flask import request from flask_restplus import Namespace, Resource from CTFd.models import db, Tags from CTFd.schemas.tags import TagSchema from CTFd.utils.decorators import admins_only tags_namespace = Namespace("tags", description="Endpoint to retrieve Tags") @tags_namespace.route("") class TagList(Resource): @admins_only def get(self): # TODO: Filter by challenge_id tags = Tags.query.all() schema = TagSchema(many=True) response = schema.dump(tags) if response.errors: return {"success": False, "errors": response.errors}, 400 return {"success": True, "data": response.data} @admins_only def post(self): req = request.get_json() schema = TagSchema() response = schema.load(req, session=db.session) if response.errors: return {"success": False, "errors": response.errors}, 400 db.session.add(response.data) db.session.commit() response = schema.dump(response.data) db.session.close() return {"success": True, "data": response.data} @tags_namespace.route("/<tag_id>") @tags_namespace.param("tag_id", "A Tag ID") class Tag(Resource): @admins_only def get(self, tag_id): tag = Tags.query.filter_by(id=tag_id).first_or_404() response = TagSchema().dump(tag) if response.errors: return {"success": False, "errors": response.errors}, 400 return {"success": True, "data": response.data} @admins_only def patch(self, tag_id): tag = Tags.query.filter_by(id=tag_id).first_or_404() schema = TagSchema() req = request.get_json() response = schema.load(req, session=db.session, instance=tag) if response.errors: return {"success": False, "errors": response.errors}, 400 db.session.commit() response = schema.dump(response.data) db.session.close() return {"success": True, "data": response.data} @admins_only def delete(self, tag_id): tag = Tags.query.filter_by(id=tag_id).first_or_404() db.session.delete(tag) db.session.commit() db.session.close() return {"success": True}
27.728395
75
0.628673
b30d46ffe7ad65c08c29223595e94079b496c23d
969
py
Python
pdseg/utils/paddle_utils.py
windstamp/PaddleSeg
828808ea306adf2e8b94c291b77e7b7cf558bc2a
[ "ECL-2.0", "Apache-2.0" ]
3
2021-03-17T04:14:06.000Z
2021-07-15T04:11:41.000Z
pdseg/utils/paddle_utils.py
windstamp/PaddleSeg
828808ea306adf2e8b94c291b77e7b7cf558bc2a
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
pdseg/utils/paddle_utils.py
windstamp/PaddleSeg
828808ea306adf2e8b94c291b77e7b7cf558bc2a
[ "ECL-2.0", "Apache-2.0" ]
2
2021-08-04T02:48:50.000Z
2021-11-16T08:13:41.000Z
# coding: utf8 # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle def enable_static(): if hasattr(paddle, 'enable_static'): paddle.enable_static() def save_op_version_info(program_desc): if hasattr(paddle.fluid.core, 'save_op_version_info'): paddle.fluid.core.save_op_version_info(program_desc) else: paddle.fluid.core.save_op_compatible_info(program_desc)
33.413793
74
0.753354
ce6cc91222ffeb44355cbfe13bdaa81b2f1421ed
4,076
py
Python
python/examples/iree/simple_matmul.py
ingomueller-net/iree-llvm-sandbox
decba32e6c5fffd4dbeccf15f50b65d7c1335500
[ "Apache-2.0" ]
null
null
null
python/examples/iree/simple_matmul.py
ingomueller-net/iree-llvm-sandbox
decba32e6c5fffd4dbeccf15f50b65d7c1335500
[ "Apache-2.0" ]
null
null
null
python/examples/iree/simple_matmul.py
ingomueller-net/iree-llvm-sandbox
decba32e6c5fffd4dbeccf15f50b65d7c1335500
[ "Apache-2.0" ]
null
null
null
import numpy as np import logging from iree import runtime as ireert from iree.compiler import compile_str import iree.compiler.tools import iree.compiler.dialects.transform as transform import iree.compiler.dialects.pdl as pdl import iree.compiler.ir as ir # This example is identical to `simple_matmul_sequential.py`, but uses MLIR # Python bindings to create ops. module_str = None with ir.Context() as ctx, ir.Location.unknown(ctx): transform.register_dialect(ctx) module = ir.Module.create() with ir.InsertionPoint(module.body): isa_matmul = pdl.PatternOp(benefit = 1, name = "isa_matmul") with ir.InsertionPoint(isa_matmul.body): args = pdl.OperandsOp() types = pdl.TypesOp() pdl_op = pdl.OperationOp(args=[args], types=[types]) op_name = pdl.AttributeOp(value=ir.StringAttr.get("linalg.matmul")) pdl.ApplyNativeConstraintOp("isEquivalentToOp", args=[pdl_op, op_name]) pdl.RewriteOp(pdl_op, "transform.apply") transform_sequence = transform.SequenceOp() with ir.InsertionPoint(transform_sequence.body.blocks[0]): ir.Operation.create(name="transform.iree.set_num_workgroups_to_one") target_match = transform.MatchOp(ir.FlatSymbolRefAttr.get('isa_matmul')) # TODO: fuse... tiled = transform.TileOp(target=target_match, sizes=[8, 32, 8], interchange=[0, 1, 2]) transform.PeelLoopOp(tiled.results[1]) transform.PeelLoopOp(tiled.results[2]) # TODO: Match dynamic matmul and scalarize. transform.VectorizeOp(vectorize_padding=False) ir.Operation.create(name="transform.iree.bufferize") stages = [] for i in range(1, 8): stages.append(i) transform.LowerVectorsOp(contraction_lowering="outerproduct", multireduction_lowering="innerparallel", split_transfers="linalg-copy", stages=stages, transpose_avx2_lowering=False, transpose_lowering="eltwise", unroll_vector_transfers=True) transform.YieldOp([]) module_str = str(module) ################################################################################ # Hardcoded strategy with the schedule dialect to drive IREE through a file. ################################################################################ TRANSFORM_SPEC_FILE_NAME = "/tmp/linalg_transform_spec.mlir" with open(TRANSFORM_SPEC_FILE_NAME, "w") as f: f.write(module_str) ################################################################################ # END Hardcoded strategy with the schedule dialect to drive IREE through a file. ################################################################################ # Compile a module. DOT_ASM = """ func @dot(%lhs: tensor<127x128xf32>, %rhs: tensor<128x129xf32>) -> tensor<127x129xf32> { %0 = "mhlo.dot"(%lhs, %rhs) : (tensor<127x128xf32>, tensor<128x129xf32>) -> tensor<127x129xf32> return %0 : tensor<127x129xf32> } """ binary = iree.compiler.tools.compile_str( DOT_ASM, input_type="mhlo", target_backends=["dylib"], extra_args=[ '--iree-codegen-use-linalg-transform-interp', '--linalg-transform-file-name=' + TRANSFORM_SPEC_FILE_NAME, #'-mlir-print-ir-after-all', #'-mlir-print-ir-after-change', ]) print(f'Flatbuffer size = {len(binary)}') with open('/tmp/binary.vfmb', "wb") as f: f.write(binary) # Register the module with a runtime context. # Use the CPU interpreter (which has the most implementation done): config = ireert.Config("dylib") ctx = ireert.SystemContext(config=config) vm_module = ireert.VmModule.from_flatbuffer(binary) ctx.add_vm_module(vm_module) # Invoke the function and print the result. lhs = np.full((127, 128), 1, dtype=np.float32) rhs = np.full((128, 129), 2, dtype=np.float32) dot = ctx.modules.module.dot res = dot() np.testing.assert_allclose(res, np.dot(lhs, rhs)) print('SUCCESS')
37.740741
100
0.619235
5abf66f4c23bf81a0a1d4e9ecd9b2658eb037acf
49,306
py
Python
tests/t5/test_modeling_tf_t5.py
bhavika/transformers
65cf33e7e53cd46313f3655f274b3f6ca0fd679d
[ "Apache-2.0" ]
1
2022-03-14T12:05:34.000Z
2022-03-14T12:05:34.000Z
tests/t5/test_modeling_tf_t5.py
bhavika/transformers
65cf33e7e53cd46313f3655f274b3f6ca0fd679d
[ "Apache-2.0" ]
2
2022-03-14T10:13:16.000Z
2022-03-14T11:50:27.000Z
tests/t5/test_modeling_tf_t5.py
bhavika/transformers
65cf33e7e53cd46313f3655f274b3f6ca0fd679d
[ "Apache-2.0" ]
2
2022-03-21T04:32:39.000Z
2022-03-22T01:02:49.000Z
# coding=utf-8 # Copyright 2018 Google T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import T5Config, is_tf_available from transformers.file_utils import cached_property from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ..test_configuration_common import ConfigTester from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor if is_tf_available(): import tensorflow as tf from transformers import ByT5Tokenizer, T5Tokenizer, TFT5EncoderModel, TFT5ForConditionalGeneration, TFT5Model class TFT5ModelTester: def __init__( self, parent, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_mask = True self.use_labels = True self.vocab_size = 99 self.n_positions = 14 self.hidden_size = 32 self.num_hidden_layers = 5 self.num_attention_heads = 4 self.d_ff = 37 self.relative_attention_num_buckets = 8 self.dropout_rate = 0.1 self.initializer_factor = 0.002 self.eos_token_id = 1 self.pad_token_id = 0 self.scope = None def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_labels = None if self.use_labels: token_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = T5Config( vocab_size=self.vocab_size, n_positions=self.n_positions, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, ) return (config, input_ids, input_mask, token_labels) def create_and_check_t5_model(self, config, input_ids, input_mask, token_labels): model = TFT5Model(config=config) inputs = { "input_ids": input_ids, "decoder_input_ids": input_ids, "decoder_attention_mask": input_mask, } result = model(inputs) result = model(input_ids, decoder_attention_mask=input_mask, decoder_input_ids=input_ids) decoder_output = result.last_hidden_state decoder_past = result.past_key_values encoder_output = result.encoder_last_hidden_state self.parent.assertListEqual(list(encoder_output.shape), [self.batch_size, self.seq_length, self.hidden_size]) self.parent.assertListEqual(list(decoder_output.shape), [self.batch_size, self.seq_length, self.hidden_size]) # There should be `num_layers` key value embeddings stored in decoder_past[1] self.parent.assertEqual(len(decoder_past), config.num_layers) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past[1] tuple self.parent.assertEqual(len(decoder_past[0]), 4) def create_and_check_t5_with_lm_head(self, config, input_ids, input_mask, token_labels): model = TFT5ForConditionalGeneration(config=config) inputs_dict = { "input_ids": input_ids, "decoder_input_ids": input_ids, "decoder_attention_mask": input_mask, } result = model(inputs_dict) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_t5_decoder_model_past(self, config, input_ids, decoder_input_ids, attention_mask): model = TFT5Model(config=config).get_decoder() input_ids = input_ids[:1, :] self.batch_size = 1 # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) output_from_no_past = model(next_input_ids)[0] output_from_past = model(next_tokens, past_key_values=outputs.past_key_values)[0] # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx] output_from_past_slice = output_from_past[:, 0, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def create_and_check_t5_decoder_model_attention_mask_past( self, config, input_ids, decoder_input_ids, attention_mask ): model = TFT5Model(config=config).get_decoder() # create attention mask half_seq_length = self.seq_length // 2 attn_mask_begin = tf.ones((self.batch_size, half_seq_length), dtype=tf.int32) attn_mask_end = tf.zeros((self.batch_size, self.seq_length - half_seq_length), dtype=tf.int32) attn_mask = tf.concat([attn_mask_begin, attn_mask_end], axis=1) # first forward pass outputs = model(input_ids, attention_mask=attn_mask, use_cache=True) # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).numpy() + 1 random_other_next_tokens = ids_tensor((self.batch_size, self.seq_length), config.vocab_size) vector_condition = tf.range(self.seq_length) == (self.seq_length - random_seq_idx_to_change) condition = tf.transpose( tf.broadcast_to(tf.expand_dims(vector_condition, -1), (self.seq_length, self.batch_size)) ) input_ids = tf.where(condition, random_other_next_tokens, input_ids) # append to next input_ids and attn_mask next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) attn_mask = tf.concat( [attn_mask, tf.ones((attn_mask.shape[0], 1), dtype=tf.int32)], axis=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)[0] output_from_past = model(next_tokens, past_key_values=outputs.past_key_values, attention_mask=attn_mask)[0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).numpy().item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx] output_from_past_slice = output_from_past[:, 0, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def create_and_check_t5_decoder_model_past_large_inputs( self, config, input_ids, decoder_input_ids, attention_mask ): model = TFT5Model(config=config).get_decoder() input_ids = input_ids[:1, :] attention_mask = attention_mask[:1, :] self.batch_size = 1 # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, past_key_values=outputs.past_key_values )[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids, input_mask, token_labels) = config_and_inputs inputs_dict = { "input_ids": input_ids, "decoder_input_ids": input_ids, "decoder_attention_mask": input_mask, } return config, inputs_dict @require_tf class TFT5ModelTest(TFModelTesterMixin, unittest.TestCase): is_encoder_decoder = True all_model_classes = (TFT5Model, TFT5ForConditionalGeneration) if is_tf_available() else () all_generative_model_classes = (TFT5ForConditionalGeneration,) if is_tf_available() else () test_onnx = False def setUp(self): self.model_tester = TFT5ModelTester(self) self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37) def test_config(self): self.config_tester.run_common_tests() def test_t5_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_t5_model(*config_and_inputs) def test_t5_model_v1_1(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() config = config_and_inputs[0] config.tie_word_embeddings = False config.feed_forward_proj = "gated-gelu" self.model_tester.create_and_check_t5_model(config, *config_and_inputs[1:]) def test_with_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_t5_with_lm_head(*config_and_inputs) def test_t5_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_t5_decoder_model_past(*config_and_inputs) def test_t5_decoder_model_past_with_attn_mask(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_t5_decoder_model_attention_mask_past(*config_and_inputs) def test_t5_decoder_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_t5_decoder_model_past_large_inputs(*config_and_inputs) def test_model_common_attributes(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer) if model_class in self.all_generative_model_classes: x = model.get_output_embeddings() assert isinstance(x, tf.keras.layers.Layer) name = model.get_bias() assert name is None else: x = model.get_output_embeddings() assert x is None name = model.get_bias() assert name is None def test_saved_model_creation(self): # This test is too long (>30sec) and makes fail the CI pass @slow def test_model_from_pretrained(self): model = TFT5Model.from_pretrained("t5-small") self.assertIsNotNone(model) def test_generate_with_headmasking(self): # TODO: Fix head-masking according to PyTorch T5 model pass @slow def test_resize_embeddings(self): model = TFT5ForConditionalGeneration.from_pretrained("t5-small") original_vocab_size = model.get_input_embeddings().weight.shape[0] # the vocab size is defined in the model config self.assertEqual(original_vocab_size, model.config.vocab_size) tokenizer = T5Tokenizer.from_pretrained("t5-small") tokenizer.add_special_tokens({"bos_token": "", "eos_token": ""}) model._resize_token_embeddings(len(tokenizer)) # the vocab size is now resized to the length of the tokenizer, which is different from the original size self.assertEqual(model.get_input_embeddings().weight.shape[0], len(tokenizer)) self.assertNotEqual(model.get_input_embeddings().weight.shape[0], original_vocab_size) class TFT5EncoderOnlyModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, # For common tests use_attention_mask=True, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, is_training=False, dropout_rate=0.1, initializer_factor=0.002, is_encoder_decoder=False, eos_token_id=1, pad_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length # For common tests self.seq_length = self.encoder_seq_length self.use_attention_mask = use_attention_mask self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.is_encoder_decoder = is_encoder_decoder self.scope = None self.is_training = is_training def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) config = T5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, is_encoder_decoder=self.is_encoder_decoder, ) return ( config, input_ids, attention_mask, ) def create_and_check_model( self, config, input_ids, attention_mask, ): model = TFT5EncoderModel(config=config) result = model( input_ids=input_ids, attention_mask=attention_mask, ) result = model(input_ids=input_ids) encoder_output = result.last_hidden_state self.parent.assertEqual(encoder_output.shape, (self.batch_size, self.encoder_seq_length, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict class TFT5EncoderOnlyModelTest(TFModelTesterMixin, unittest.TestCase): is_encoder_decoder = False all_model_classes = (TFT5EncoderModel,) if is_tf_available() else () test_onnx = False def setUp(self): self.model_tester = TFT5EncoderOnlyModelTester(self) self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) # is not able to be part of a pipeline def test_train_pipeline_custom_model(self): pass @require_tf @require_sentencepiece @require_tokenizers class TFT5GenerationIntegrationTests(unittest.TestCase): @slow def test_greedy_generate(self): model = TFT5ForConditionalGeneration.from_pretrained("t5-small") tokenizer = T5Tokenizer.from_pretrained("t5-small") sentences = ["Yesterday, my name was", "Today is a beautiful day and"] input_ids = tokenizer(sentences, return_tensors="tf", padding=True).input_ids generation_kwargs = { "bad_words_ids": [tokenizer("my").input_ids, tokenizer("ein schöner").input_ids], "no_repeat_ngram_size": 3, "do_sample": False, "repetition_penalty": 2.2, } output_ids = model.generate(input_ids, **generation_kwargs) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) expected_output_string = ["Yesterday, my name was", "Heute ist ein schöne Tag und"] self.assertListEqual(expected_output_string, output_strings) @slow def test_sample_generate(self): model = TFT5ForConditionalGeneration.from_pretrained("t5-small") tokenizer = T5Tokenizer.from_pretrained("t5-small") sentences = ["I really love my", "Translate English to German: the transformers are truly amazing"] input_ids = tokenizer(sentences, return_tensors="tf", padding=True).input_ids generation_kwargs = { "do_sample": True, "bad_words_ids": [tokenizer("my").input_ids, tokenizer("ein schöner").input_ids], "no_repeat_ngram_size": 3, "repetition_penalty": 2.2, "temperature": 0.8, "top_k": 500, "top_p": 0.9, } # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(":/CPU:0"): tf.random.set_seed(42) # deterministic sampling sequence -> deterministic generation output_ids = model.generate(input_ids, **generation_kwargs) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) expected_output_string = ["i love her I really love my heart", "die Transformatoren sind wirklich erstaunlich"] self.assertListEqual(expected_output_string, output_strings) @require_tf @require_sentencepiece @require_tokenizers class TFT5ModelIntegrationTests(unittest.TestCase): @cached_property def model(self): return TFT5ForConditionalGeneration.from_pretrained("t5-base") @slow def test_small_integration_test(self): """ For comparision run: >>> import t5 # pip install t5==0.7.1 >>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary >>> path_to_mtf_small_t5_checkpoint = '<fill_in>' >>> path_to_mtf_small_spm_model_path = '<fill_in>' >>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_checkpoint, batch_size=1, tpu=None) >>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100) >>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab) """ model = TFT5ForConditionalGeneration.from_pretrained("t5-small") tokenizer = T5Tokenizer.from_pretrained("t5-small") input_ids = tokenizer("Hello there", return_tensors="tf").input_ids labels = tokenizer("Hi I am", return_tensors="tf").input_ids loss = model(input_ids, labels=labels).loss mtf_score = -tf.math.reduce_sum(loss).numpy() EXPECTED_SCORE = -19.0845 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) @slow def test_small_v1_1_integration_test(self): """ For comparision run: >>> import t5 # pip install t5==0.7.1 >>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary >>> path_to_mtf_small_t5_v1.1_checkpoint = '<fill_in>' >>> path_to_mtf_small_spm_model_path = '<fill_in>' >>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_v1.1_checkpoint, batch_size=1, tpu=None) >>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100) >>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab) """ model = TFT5ForConditionalGeneration.from_pretrained("google/t5-v1_1-small") tokenizer = T5Tokenizer.from_pretrained("google/t5-v1_1-small") input_ids = tokenizer("Hello there", return_tensors="tf").input_ids labels = tokenizer("Hi I am", return_tensors="tf").input_ids loss = model(input_ids, labels=labels).loss mtf_score = -tf.math.reduce_sum(loss).numpy() EXPECTED_SCORE = -59.0293 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) @slow def test_small_byt5_integration_test(self): """ For comparision run: >>> import t5 # pip install t5==0.9.1 >>> path_to_byt5_small_checkpoint = '<fill_in>' >>> t5_model = t5.models.MtfModel(model_dir=path_to_tf_checkpoint, batch_size=1, tpu=None) >>> vocab = t5.data.ByteVocabulary() >>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab) """ model = TFT5ForConditionalGeneration.from_pretrained("google/byt5-small") tokenizer = ByT5Tokenizer.from_pretrained("google/byt5-small") input_ids = tokenizer("Hello there", return_tensors="tf").input_ids labels = tokenizer("Hi I am", return_tensors="tf").input_ids loss = model(input_ids, labels=labels).loss mtf_score = -tf.math.reduce_sum(loss).numpy() EXPECTED_SCORE = -60.7397 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) @slow def test_summarization(self): model = self.model tok = T5Tokenizer.from_pretrained("t5-base") FRANCE_ARTICLE = 'Marseille, France (CNN)The French prosecutor leading an investigation into the crash of Germanwings Flight 9525 insisted Wednesday that he was not aware of any video footage from on board the plane. Marseille prosecutor Brice Robin told CNN that "so far no videos were used in the crash investigation." He added, "A person who has such a video needs to immediately give it to the investigators." Robin\'s comments follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the French Alps. All 150 on board were killed. Paris Match and Bild reported that the video was recovered from a phone at the wreckage site. The two publications described the supposed video, but did not post it on their websites. The publications said that they watched the video, which was found by a source close to the investigation. "One can hear cries of \'My God\' in several languages," Paris Match reported. "Metallic banging can also be heard more than three times, perhaps of the pilot trying to open the cockpit door with a heavy object. Towards the end, after a heavy shake, stronger than the others, the screaming intensifies. Then nothing." "It is a very disturbing scene," said Julian Reichelt, editor-in-chief of Bild online. An official with France\'s accident investigation agency, the BEA, said the agency is not aware of any such video. Lt. Col. Jean-Marc Menichini, a French Gendarmerie spokesman in charge of communications on rescue efforts around the Germanwings crash site, told CNN that the reports were "completely wrong" and "unwarranted." Cell phones have been collected at the site, he said, but that they "hadn\'t been exploited yet." Menichini said he believed the cell phones would need to be sent to the Criminal Research Institute in Rosny sous-Bois, near Paris, in order to be analyzed by specialized technicians working hand-in-hand with investigators. But none of the cell phones found so far have been sent to the institute, Menichini said. Asked whether staff involved in the search could have leaked a memory card to the media, Menichini answered with a categorical "no." Reichelt told "Erin Burnett: Outfront" that he had watched the video and stood by the report, saying Bild and Paris Match are "very confident" that the clip is real. He noted that investigators only revealed they\'d recovered cell phones from the crash site after Bild and Paris Match published their reports. "That is something we did not know before. ... Overall we can say many things of the investigation weren\'t revealed by the investigation at the beginning," he said. What was mental state of Germanwings co-pilot? German airline Lufthansa confirmed Tuesday that co-pilot Andreas Lubitz had battled depression years before he took the controls of Germanwings Flight 9525, which he\'s accused of deliberately crashing last week in the French Alps. Lubitz told his Lufthansa flight training school in 2009 that he had a "previous episode of severe depression," the airline said Tuesday. Email correspondence between Lubitz and the school discovered in an internal investigation, Lufthansa said, included medical documents he submitted in connection with resuming his flight training. The announcement indicates that Lufthansa, the parent company of Germanwings, knew of Lubitz\'s battle with depression, allowed him to continue training and ultimately put him in the cockpit. Lufthansa, whose CEO Carsten Spohr previously said Lubitz was 100% fit to fly, described its statement Tuesday as a "swift and seamless clarification" and said it was sharing the information and documents -- including training and medical records -- with public prosecutors. Spohr traveled to the crash site Wednesday, where recovery teams have been working for the past week to recover human remains and plane debris scattered across a steep mountainside. He saw the crisis center set up in Seyne-les-Alpes, laid a wreath in the village of Le Vernet, closer to the crash site, where grieving families have left flowers at a simple stone memorial. Menichini told CNN late Tuesday that no visible human remains were left at the site but recovery teams would keep searching. French President Francois Hollande, speaking Tuesday, said that it should be possible to identify all the victims using DNA analysis by the end of the week, sooner than authorities had previously suggested. In the meantime, the recovery of the victims\' personal belongings will start Wednesday, Menichini said. Among those personal belongings could be more cell phones belonging to the 144 passengers and six crew on board. Check out the latest from our correspondents . The details about Lubitz\'s correspondence with the flight school during his training were among several developments as investigators continued to delve into what caused the crash and Lubitz\'s possible motive for downing the jet. A Lufthansa spokesperson told CNN on Tuesday that Lubitz had a valid medical certificate, had passed all his examinations and "held all the licenses required." Earlier, a spokesman for the prosecutor\'s office in Dusseldorf, Christoph Kumpa, said medical records reveal Lubitz suffered from suicidal tendencies at some point before his aviation career and underwent psychotherapy before he got his pilot\'s license. Kumpa emphasized there\'s no evidence suggesting Lubitz was suicidal or acting aggressively before the crash. Investigators are looking into whether Lubitz feared his medical condition would cause him to lose his pilot\'s license, a European government official briefed on the investigation told CNN on Tuesday. While flying was "a big part of his life," the source said, it\'s only one theory being considered. Another source, a law enforcement official briefed on the investigation, also told CNN that authorities believe the primary motive for Lubitz to bring down the plane was that he feared he would not be allowed to fly because of his medical problems. Lubitz\'s girlfriend told investigators he had seen an eye doctor and a neuropsychologist, both of whom deemed him unfit to work recently and concluded he had psychological issues, the European government official said. But no matter what details emerge about his previous mental health struggles, there\'s more to the story, said Brian Russell, a forensic psychologist. "Psychology can explain why somebody would turn rage inward on themselves about the fact that maybe they weren\'t going to keep doing their job and they\'re upset about that and so they\'re suicidal," he said. "But there is no mental illness that explains why somebody then feels entitled to also take that rage and turn it outward on 149 other people who had nothing to do with the person\'s problems." Germanwings crash compensation: What we know . Who was the captain of Germanwings Flight 9525? CNN\'s Margot Haddad reported from Marseille and Pamela Brown from Dusseldorf, while Laura Smith-Spark wrote from London. CNN\'s Frederik Pleitgen, Pamela Boykoff, Antonia Mortensen, Sandrine Amiel and Anna-Maja Rappard contributed to this report.' # @noqa SHORTER_ARTICLE = '(CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based. The Palestinians signed the ICC\'s founding Rome Statute in January, when they also accepted its jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the situation in Palestinian territories, paving the way for possible war crimes investigations against Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and the United States, neither of which is an ICC member, opposed the Palestinians\' efforts to join the body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday\'s ceremony, said it was a move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the world is also a step closer to ending a long era of impunity and injustice," he said, according to an ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine acquires all the rights as well as responsibilities that come with being a State Party to the Statute. These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should immediately end their pressure, and countries that support universal acceptance of the court\'s treaty should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the group. "What\'s objectionable is the attempts to undermine international justice, not Palestine\'s decision to join a treaty to which over 100 countries around the world are members." In January, when the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an outrage, saying the court was overstepping its boundaries. The United States also said it "strongly" disagreed with the court\'s decision. "As we have said repeatedly, we do not believe that Palestine is a state and therefore we do not believe that it is eligible to join the ICC," the State Department said in a statement. It urged the warring sides to resolve their differences through direct negotiations. "We will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace," it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou Bensouda said her office would "conduct its analysis in full independence and impartiality." The war between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry will include alleged war crimes committed since June. The International Criminal Court was set up in 2002 to prosecute genocide, crimes against humanity and war crimes. CNN\'s Vasco Cotovio, Kareem Khadder and Faith Karimi contributed to this report.' IRAN_ARTICLE = "(CNN)The United States and its negotiating partners reached a very strong framework agreement with Iran in Lausanne, Switzerland, on Thursday that limits Iran's nuclear program in such a way as to effectively block it from building a nuclear weapon. Expect pushback anyway, if the recent past is any harbinger. Just last month, in an attempt to head off such an agreement, House Speaker John Boehner invited Israeli Prime Minister Benjamin Netanyahu to preemptively blast it before Congress, and 47 senators sent a letter to the Iranian leadership warning them away from a deal. The debate that has already begun since the announcement of the new framework will likely result in more heat than light. It will not be helped by the gathering swirl of dubious assumptions and doubtful assertions. Let us address some of these: . The most misleading assertion, despite universal rejection by experts, is that the negotiations' objective at the outset was the total elimination of any nuclear program in Iran. That is the position of Netanyahu and his acolytes in the U.S. Congress. But that is not and never was the objective. If it had been, there would have been no Iranian team at the negotiating table. Rather, the objective has always been to structure an agreement or series of agreements so that Iran could not covertly develop a nuclear arsenal before the United States and its allies could respond. The new framework has exceeded expectations in achieving that goal. It would reduce Iran's low-enriched uranium stockpile, cut by two-thirds its number of installed centrifuges and implement a rigorous inspection regime. Another dubious assumption of opponents is that the Iranian nuclear program is a covert weapons program. Despite sharp accusations by some in the United States and its allies, Iran denies having such a program, and U.S. intelligence contends that Iran has not yet made the decision to build a nuclear weapon. Iran's continued cooperation with International Atomic Energy Agency inspections is further evidence on this point, and we'll know even more about Iran's program in the coming months and years because of the deal. In fact, the inspections provisions that are part of this agreement are designed to protect against any covert action by the Iranians. What's more, the rhetoric of some members of Congress has implied that the negotiations have been between only the United States and Iran (i.e., the 47 senators' letter warning that a deal might be killed by Congress or a future president). This of course is not the case. The talks were between Iran and the five permanent members of the U.N. Security Council (United States, United Kingdom, France, China and Russia) plus Germany, dubbed the P5+1. While the United States has played a leading role in the effort, it negotiated the terms alongside its partners. If the agreement reached by the P5+1 is rejected by Congress, it could result in an unraveling of the sanctions on Iran and threaten NATO cohesion in other areas. Another questionable assertion is that this agreement contains a sunset clause, after which Iran will be free to do as it pleases. Again, this is not the case. Some of the restrictions on Iran's nuclear activities, such as uranium enrichment, will be eased or eliminated over time, as long as 15 years. But most importantly, the framework agreement includes Iran's ratification of the Additional Protocol, which allows IAEA inspectors expanded access to nuclear sites both declared and nondeclared. This provision will be permanent. It does not sunset. Thus, going forward, if Iran decides to enrich uranium to weapons-grade levels, monitors will be able to detect such a move in a matter of days and alert the U.N. Security Council. Many in Congress have said that the agreement should be a formal treaty requiring the Senate to \"advise and consent.\" But the issue is not suited for a treaty. Treaties impose equivalent obligations on all signatories. For example, the New START treaty limits Russia and the United States to 1,550 deployed strategic warheads. But any agreement with Iran will not be so balanced. The restrictions and obligations in the final framework agreement will be imposed almost exclusively on Iran. The P5+1 are obligated only to ease and eventually remove most but not all economic sanctions, which were imposed as leverage to gain this final deal. Finally some insist that any agreement must address Iranian missile programs, human rights violations or support for Hamas or Hezbollah. As important as these issues are, and they must indeed be addressed, they are unrelated to the most important aim of a nuclear deal: preventing a nuclear Iran. To include them in the negotiations would be a poison pill. This agreement should be judged on its merits and on how it affects the security of our negotiating partners and allies, including Israel. Those judgments should be fact-based, not based on questionable assertions or dubious assumptions." ARTICLE_SUBWAY = 'New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A year later, she got married again in Westchester County, but to a different man and without divorcing her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married once more, this time in the Bronx. In an application for a marriage license, she stated it was her "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false instrument for filing in the first degree," referring to her false statements on the 2010 marriage license application, according to court documents. Prosecutors said the marriages were part of an immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total, Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors said the immigration scam involved some of her husbands, who filed for permanent residence status shortly after the marriages. Any divorces happened only after such filings were approved. It was unclear whether any of the men will be prosecuted. The case was referred to the Bronx District Attorney\'s Office by Immigration and Customs Enforcement and the Department of Homeland Security\'s Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt, Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces up to four years in prison. Her next court appearance is scheduled for May 18.' expected_summaries = [ 'prosecutor: "so far no videos were used in the crash investigation" two magazines claim to have found a cell phone video of the final seconds . "one can hear cries of \'My God\' in several languages," one magazine says .', "the formal accession was marked by a ceremony at The Hague, in the Netherlands . the ICC opened a preliminary examination into the situation in the occupied Palestinian territory . as members of the court, Palestinians may be subject to counter-charges as well .", "the u.s. and its negotiating partners reached a very strong framework agreement with Iran . aaron miller: the debate that has already begun since the announcement of the new framework will likely result in more heat than light . the deal would reduce Iran's low-enriched uranium stockpile, cut centrifuges and implement a rigorous inspection regime .", 'prosecutors say the marriages were part of an immigration scam . if convicted, barrientos faces two criminal counts of "offering a false instrument for filing in the first degree" she has been married 10 times, with nine of her marriages occurring between 1999 and 2002 .', ] task_specific_config = getattr(model.config, "task_specific_params", {}) summarization_config = task_specific_config.get("summarization", {}) model.config.update(summarization_config) dct = tok( [model.config.prefix + x for x in [FRANCE_ARTICLE, SHORTER_ARTICLE, IRAN_ARTICLE, ARTICLE_SUBWAY]], max_length=512, padding="max_length", truncation=True, return_tensors="tf", ) self.assertEqual(512, dct["input_ids"].shape[1]) hypotheses_batch = model.generate( input_ids=dct["input_ids"], attention_mask=dct["attention_mask"], num_beams=4, length_penalty=2.0, max_length=142, min_length=56, no_repeat_ngram_size=3, do_sample=False, early_stopping=True, ) decoded = [ tok.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in hypotheses_batch ] self.assertListEqual( expected_summaries, decoded, ) @slow def test_translation_en_to_de(self): tok = T5Tokenizer.from_pretrained("t5-base") model = self.model task_specific_config = getattr(model.config, "task_specific_params", {}) translation_config = task_specific_config.get("translation_en_to_de", {}) self.model.config.update(translation_config) original_input = '"Luigi often said to me that he never wanted the brothers to end up in court", she wrote.' expected_translation = ( '"Luigi sagte mir oft, dass er nie wollte, dass die Brüder am Gericht sitzen", schrieb sie.' ) input_ids = tok.encode(model.config.prefix + original_input, return_tensors="tf") output = model.generate( input_ids=input_ids, num_beams=4, length_penalty=2.0, max_length=50, no_repeat_ngram_size=3, do_sample=False, early_stopping=True, ) translation = tok.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False) self.assertEqual(translation, expected_translation) @slow def test_translation_en_to_fr(self): model = self.model tok = T5Tokenizer.from_pretrained("t5-base") task_specific_config = getattr(model.config, "task_specific_params", {}) translation_config = task_specific_config.get("translation_en_to_fr", {}) model.config.update(translation_config) en_text = ' This image section from an infrared recording by the Spitzer telescope shows a "family portrait" of countless generations of stars: the oldest stars are seen as blue dots. ' new_truncated_translation = ( "Cette section d'images provenant de l'enregistrement infrarouge effectué par le télescope Spitzer montre " "un " "« portrait familial » de générations innombrables d’étoiles : les plus anciennes sont observées " "sous forme " "de points bleus." ) input_ids = tok(model.config.prefix + en_text, return_tensors="tf").input_ids output = model.generate( input_ids=input_ids, num_beams=4, length_penalty=2.0, max_length=100, no_repeat_ngram_size=3, do_sample=False, early_stopping=True, ) translation = tok.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False) self.assertEqual(translation, new_truncated_translation) @slow def test_translation_en_to_ro(self): model = self.model tok = T5Tokenizer.from_pretrained("t5-base") task_specific_config = getattr(model.config, "task_specific_params", {}) translation_config = task_specific_config.get("translation_en_to_ro", {}) model.config.update(translation_config) original_input = "Taco Bell said it plans to add 2,000 locations in the US by 2022." expected_translation = "Taco Bell a declarat că intenţionează să adauge 2 000 de locaţii în SUA până în 2022." input_ids = tok.encode(model.config.prefix + original_input, return_tensors="tf") output = model.generate( input_ids=input_ids, num_beams=4, length_penalty=2.0, max_length=50, no_repeat_ngram_size=3, do_sample=False, early_stopping=True, ) translation = tok.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False) self.assertEqual(translation, expected_translation) def test_finetune_keras_trainer(self): """Ensure that the model can be fine-tuned via the keras API and that metrics work as expected. """ # This metric expects to be called with the logits output def _accuracy(y_true, y_pred): return tf.keras.metrics.sparse_categorical_crossentropy(y_true[:, 0], y_pred[:, 0]) # measure the accuracy of the first token class FirstTokenAccuracy(tf.keras.metrics.MeanMetricWrapper): def __init__(self, name="accuracy", **kwargs): super().__init__(_accuracy, name=name, **kwargs) model = self.model model.compile("adam", metrics=FirstTokenAccuracy()) tokenizer = T5Tokenizer.from_pretrained("t5-small") examples = [ ("sentiment: Everything is awesome!", "positive"), ("sentiment: Tensorflow datasets are hard to use", "negative"), ] inputs = dict(tokenizer([x[0] for x in examples], padding=True, return_tensors="tf")) inputs["labels"] = tokenizer([x[1] for x in examples], return_tensors="tf").input_ids model.fit(inputs) m = model.evaluate(inputs) self.assertEqual(len(m), 2)
64.284224
7,207
0.727356
657dc5473bb5d09fd8d5e88388ec4de5022cb23c
3,156
py
Python
lessons/3/homework/calculator_finished.py
reedcwilson/programming-fundamentals
d381bae21a3c16ba6fe3bf214557ff9a8d932ed0
[ "MIT" ]
null
null
null
lessons/3/homework/calculator_finished.py
reedcwilson/programming-fundamentals
d381bae21a3c16ba6fe3bf214557ff9a8d932ed0
[ "MIT" ]
null
null
null
lessons/3/homework/calculator_finished.py
reedcwilson/programming-fundamentals
d381bae21a3c16ba6fe3bf214557ff9a8d932ed0
[ "MIT" ]
2
2015-06-18T02:24:12.000Z
2018-07-14T04:56:54.000Z
#!/usr/bin/env python import os import sys ## Write functions for each operation def add(first, second): return first + second def add_three(first, second, third): return operate_three(first, second, third, add) def subtract(first, second): return first - second def subtract_three(first, second, third): return operate_three(first, second, third, subtract) def multiply(first, second): return first * second def multiply_three(first, second, third): return operate_three(first, second, third, multiply) def divide(first, second): try: return first / second except ZeroDivisionError: print 'Dividing by zero has the potential to destroy the universe.' print 'Quitting before any irreversible damage is incurred.' sys.exit() def divide_three(first, second): return operate_three(first, second, third, divide) def operate_three(first, second, third, func): return func(func(first, second), third) def exponent(num, power): return num**power def get_num(string): try: return int(string) except Exception: print 'I was unable to parse your input. Please enter a number next time' sys.exit() def print_result(num): print 'The answer is: %s' % (num) def main(): # clear the console screen os.system('clear') ## have the user choose an operator print 'Choose your operator:' print '1. +\n2. -\n3. *\n4. /\n5. ^' operator = raw_input() ## ask for two or three operands num_operands = get_num(raw_input('Two or three operands: ')) ## get the first and second operands operand_1 = get_num(raw_input('Please enter the first operand: ')) operand_2 = get_num(raw_input('Please enter the second operand: ')) ## get the third operand if they wanted three operand_3 = None if num_operands == 3: operand_3 = get_num(raw_input('Please enter the third operand: ')) ## call the appropriate function based on their operator and number of operands if operator == '1': if operand_3: print_result(add_three(operand_1, operand_2, operand_3)) else: print_result(add(operand_1, operand_2)) if operator == '2': if operand_3: print_result(subtract_three(operand_1, operand_2, operand_3)) else: print_result(subtract(operand_1, operand_2)) if operator == '3': if operand_3: print_result(multiply_three(operand_1, operand_2, operand_3)) else: print_result(multiply(operand_1, operand_2)) if operator == '4': if operand_3: print_result(divide_three(operand_1, operand_2, operand_3)) else: print_result(divide(operand_1, operand_2)) if operator == '5': if operand_3: print "Sorry. I don't know what you mean with three operands and an exponent" else: print_result(exponent(operand_1, operand_2)) # wait for the user to press enter to quit raw_input('\nPress enter to quit...') # clear the console screen os.system('clear') # this makes it so that when you run your file that it will call the main # function. It is always good practice to do this. We put all of the runtime # functionality in the main function if __name__ == '__main__': main()
25.658537
83
0.703739
985d75c26a39d5f8ad8e172571965dcd86a6008b
1,025
py
Python
backend/tests/libs/lib_cinq_auditor_aws_required_tags.py
MrSecure/cloud-inquisitor
d81ee2d348e09361cb5f959f28b11e630a28989c
[ "Apache-2.0" ]
null
null
null
backend/tests/libs/lib_cinq_auditor_aws_required_tags.py
MrSecure/cloud-inquisitor
d81ee2d348e09361cb5f959f28b11e630a28989c
[ "Apache-2.0" ]
1
2018-10-03T09:44:38.000Z
2018-10-03T09:44:38.000Z
backend/tests/libs/lib_cinq_auditor_aws_required_tags.py
MrSecure/cloud-inquisitor
d81ee2d348e09361cb5f959f28b11e630a28989c
[ "Apache-2.0" ]
null
null
null
from io import StringIO from cloud_inquisitor.config import dbconfig, DBCJSON from cloud_inquisitor.constants import NS_AUDITOR_REQUIRED_TAGS VALID_TAGSET = [ {'Key': x, 'Value': 'value@example.com'} for x in dbconfig.get('required_tags', NS_AUDITOR_REQUIRED_TAGS, []) ] IGNORE_TAGSET = [{'Key': dbconfig.get('audit_ignore_tag', NS_AUDITOR_REQUIRED_TAGS), 'Value': 'IGNORED'}] def s3_upload_file_from_string(client, bucket_name, file_name, content): file_obj = StringIO() file_obj.write(content) client.upload_fileobj(file_obj, bucket_name, file_name) def set_audit_scope(*args): db_setting = dbconfig.get('audit_scope', NS_AUDITOR_REQUIRED_TAGS) db_setting['enabled'] = args dbconfig.set(NS_AUDITOR_REQUIRED_TAGS, 'audit_scope', DBCJSON(db_setting)) def prep_s3_testing(cinq_test_service, collect_only=False): set_audit_scope('aws_s3_bucket') dbconfig.set(NS_AUDITOR_REQUIRED_TAGS, 'collect_only', collect_only) cinq_test_service.start_mocking_services('cloudwatch', 's3')
33.064516
113
0.773659
aa24e7ee0cfb9805933671312c6e99c6621d9d1e
973
py
Python
base/migrations/0001_initial.py
SergioVzqz/Devmorize
e4e5ddaf6b8d0f66873e345e986661daae504cf7
[ "MIT" ]
null
null
null
base/migrations/0001_initial.py
SergioVzqz/Devmorize
e4e5ddaf6b8d0f66873e345e986661daae504cf7
[ "MIT" ]
null
null
null
base/migrations/0001_initial.py
SergioVzqz/Devmorize
e4e5ddaf6b8d0f66873e345e986661daae504cf7
[ "MIT" ]
null
null
null
# Generated by Django 3.2.8 on 2021-12-07 04:30 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Task', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=200)), ('description', models.TextField(blank=True, null=True)), ('complete', models.BooleanField(default=False)), ('created', models.DateTimeField(auto_now_add=True)), ('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), ]
33.551724
141
0.63001
c794ccedb95c166e23ff048e439595b73354d6cc
12,590
py
Python
tests/tensorflow_cloud/containerize_test.py
lgeiger/cloud
99890b34d1a78a358d7958fa596fbc5633db5484
[ "Apache-2.0" ]
null
null
null
tests/tensorflow_cloud/containerize_test.py
lgeiger/cloud
99890b34d1a78a358d7958fa596fbc5633db5484
[ "Apache-2.0" ]
null
null
null
tests/tensorflow_cloud/containerize_test.py
lgeiger/cloud
99890b34d1a78a358d7958fa596fbc5633db5484
[ "Apache-2.0" ]
null
null
null
# Copyright 2020 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the cloud docker containerization module.""" import os import tarfile import unittest from tensorflow_cloud import containerize from tensorflow_cloud import machine_config from mock import call, patch try: from tensorflow.python.framework.versions import VERSION except ImportError: # Use the latest TF docker image if a local installation is not available. VERSION = "latest" class TestContainerize(unittest.TestCase): def setup(self): self.entry_point = "tests/testdata/mnist_example_using_fit.py" self.chief_config = machine_config.COMMON_MACHINE_CONFIGS["K80_1X"] self.worker_config = machine_config.COMMON_MACHINE_CONFIGS["K80_1X"] self.entry_point_dir, _ = os.path.split(self.entry_point) self.mock_registry = "gcr.io/my-project" self.project_id = "my-project" def cleanup(self, docker_file): os.remove(docker_file) def assert_docker_file(self, expected_lines, docker_file): with open(docker_file, "r") as f: actual_lines = f.readlines() self.assertListEqual(expected_lines, actual_lines) def test_create_docker_file_defaults(self): self.setup() lcb = containerize.LocalContainerBuilder( self.entry_point, None, self.chief_config, self.worker_config, self.mock_registry, self.project_id, ) lcb._create_docker_file() expected_docker_file_lines = [ "FROM tensorflow/tensorflow:{}-gpu\n".format(VERSION), "WORKDIR /app/\n", "COPY /app/ /app/\n", 'ENTRYPOINT ["python", "mnist_example_using_fit.py"]', ] self.assert_docker_file(expected_docker_file_lines, lcb.docker_file_path) self.cleanup(lcb.docker_file_path) def test_create_docker_with_requirements(self): self.setup() req_file = "requirements.txt" with open(req_file, "w") as f: f.writelines(["tensorflow-datasets"]) lcb = containerize.LocalContainerBuilder( self.entry_point, None, self.chief_config, self.worker_config, self.mock_registry, self.project_id, requirements_txt=req_file, ) lcb._create_docker_file() expected_docker_file_lines = [ "FROM tensorflow/tensorflow:{}-gpu\n".format(VERSION), "WORKDIR /app/\n", "COPY /app/ /app/\n", "RUN if [ -e requirements.txt ]; " "then pip install --no-cache -r requirements.txt; fi\n", 'ENTRYPOINT ["python", "mnist_example_using_fit.py"]', ] self.assert_docker_file(expected_docker_file_lines, lcb.docker_file_path) os.remove(req_file) self.cleanup(lcb.docker_file_path) def test_create_docker_file_with_destination_dir(self): self.setup() lcb = containerize.LocalContainerBuilder( self.entry_point, None, self.chief_config, self.worker_config, self.mock_registry, self.project_id, destination_dir="/my_app/temp/", ) lcb._create_docker_file() expected_docker_file_lines = [ "FROM tensorflow/tensorflow:{}-gpu\n".format(VERSION), "WORKDIR /my_app/temp/\n", "COPY /my_app/temp/ /my_app/temp/\n", 'ENTRYPOINT ["python", "mnist_example_using_fit.py"]', ] self.assert_docker_file(expected_docker_file_lines, lcb.docker_file_path) self.cleanup(lcb.docker_file_path) def test_create_docker_file_with_docker_base_image(self): self.setup() lcb = containerize.LocalContainerBuilder( self.entry_point, None, self.chief_config, self.worker_config, self.mock_registry, self.project_id, docker_base_image="tensorflow/tensorflow:latest", ) lcb._create_docker_file() expected_docker_file_lines = [ "FROM tensorflow/tensorflow:latest\n", "WORKDIR /app/\n", "COPY /app/ /app/\n", 'ENTRYPOINT ["python", "mnist_example_using_fit.py"]', ] self.assert_docker_file(expected_docker_file_lines, lcb.docker_file_path) self.cleanup(lcb.docker_file_path) def test_create_docker_file_with_cpu_config(self): self.setup() lcb = containerize.LocalContainerBuilder( self.entry_point, None, machine_config.COMMON_MACHINE_CONFIGS["CPU"], self.worker_config, self.mock_registry, self.project_id, ) lcb._create_docker_file() expected_docker_file_lines = [ "FROM tensorflow/tensorflow:{}\n".format(VERSION), "WORKDIR /app/\n", "COPY /app/ /app/\n", 'ENTRYPOINT ["python", "mnist_example_using_fit.py"]', ] self.assert_docker_file(expected_docker_file_lines, lcb.docker_file_path) self.cleanup(lcb.docker_file_path) def test_create_docker_file_with_tpu_config(self): self.setup() lcb = containerize.LocalContainerBuilder( self.entry_point, None, machine_config.COMMON_MACHINE_CONFIGS["CPU"], machine_config.COMMON_MACHINE_CONFIGS["TPU"], self.mock_registry, self.project_id, ) lcb._create_docker_file() expected_docker_file_lines = [ "FROM tensorflow/tensorflow:{}\n".format(VERSION), "WORKDIR /app/\n", "COPY /app/ /app/\n", "RUN pip install cloud-tpu-client\n", 'ENTRYPOINT ["python", "mnist_example_using_fit.py"]', ] self.assert_docker_file(expected_docker_file_lines, lcb.docker_file_path) self.cleanup(lcb.docker_file_path) def test_get_file_path_map_defaults(self): self.setup() lcb = containerize.LocalContainerBuilder( self.entry_point, None, self.chief_config, self.worker_config, self.mock_registry, self.project_id, ) lcb._create_docker_file() file_map = lcb._get_file_path_map() self.assertDictEqual( file_map, {lcb.docker_file_path: "Dockerfile", self.entry_point_dir: "/app/"}, ) self.cleanup(lcb.docker_file_path) def test_get_file_path_map_with_requirements(self): self.setup() req_file = "requirements.txt" with open(req_file, "w") as f: f.writelines(["tensorflow-datasets"]) lcb = containerize.LocalContainerBuilder( self.entry_point, None, self.chief_config, self.worker_config, self.mock_registry, self.project_id, requirements_txt=req_file, ) lcb._create_docker_file() file_map = lcb._get_file_path_map() self.assertDictEqual( file_map, { lcb.docker_file_path: "Dockerfile", req_file: "/app/requirements.txt", self.entry_point_dir: "/app/", }, ) os.remove(req_file) self.cleanup(lcb.docker_file_path) def test_get_file_path_map_with_destination_dir(self): self.setup() lcb = containerize.LocalContainerBuilder( self.entry_point, None, self.chief_config, self.worker_config, self.mock_registry, self.project_id, destination_dir="/my_app/temp/", ) lcb._create_docker_file() file_map = lcb._get_file_path_map() self.assertDictEqual( file_map, {lcb.docker_file_path: "Dockerfile", self.entry_point_dir: "/my_app/temp/"}, ) self.cleanup(lcb.docker_file_path) def test_get_file_path_map_with_wrapped_entry_point(self): self.setup() lcb = containerize.LocalContainerBuilder( self.entry_point, self.entry_point, self.chief_config, self.worker_config, self.mock_registry, self.project_id, destination_dir="/my_app/temp/", ) lcb._create_docker_file() file_map = lcb._get_file_path_map() self.assertDictEqual( file_map, { lcb.docker_file_path: "Dockerfile", self.entry_point_dir: "/my_app/temp/", self.entry_point: "/my_app/temp/mnist_example_using_fit.py", }, ) self.cleanup(lcb.docker_file_path) def test_get_tar_file_path(self): self.setup() req_file = "requirements.txt" with open(req_file, "w") as f: f.writelines(["tensorflow-datasets"]) lcb = containerize.LocalContainerBuilder( self.entry_point, self.entry_point, self.chief_config, self.worker_config, self.mock_registry, self.project_id, requirements_txt=req_file, ) lcb._get_tar_file_path() assert tarfile.is_tarfile(lcb.tar_file_path) tar_file = tarfile.open(lcb.tar_file_path) tar_file_names = [m.name for m in tar_file.getmembers()] self.assertIn("app/mnist_example_using_fit.py", tar_file_names) self.assertIn("app/requirements.txt", tar_file_names) self.assertIn("Dockerfile", tar_file_names) os.remove(req_file) self.cleanup(lcb.docker_file_path) @patch("tensorflow_cloud.containerize.logger") @patch("tensorflow_cloud.containerize.APIClient") def test_get_docker_image(self, MockAPIClient, MockLogger): self.setup() mock_registry = "gcr.io/my-project" mock_img_tag = mock_registry + "/tensorflow-train:abcde" # Verify mocking is correct and mock img tag. assert MockAPIClient is containerize.APIClient assert MockLogger is containerize.logger docker_client = MockAPIClient.return_value lcb = containerize.LocalContainerBuilder( self.entry_point, self.entry_point, self.chief_config, self.worker_config, self.mock_registry, self.project_id, destination_dir="/my_app/temp/", ) def _mock_generate_name(): return mock_img_tag lcb._generate_name = _mock_generate_name img_tag = lcb.get_docker_image() self.assertEqual(img_tag, mock_img_tag) # Verify docker APIClient is invoked as expected. self.assertEqual(MockAPIClient.call_count, 1) _, kwargs = MockAPIClient.call_args self.assertDictEqual(kwargs, {"version": "auto"}) # Verify APIClient().build is invoked as expected. self.assertEqual(docker_client.build.call_count, 1) _, kwargs = docker_client.build.call_args expected = { "path": ".", "custom_context": True, "encoding": "utf-8", "tag": img_tag, } self.assertTrue(set(expected.items()).issubset(set(kwargs.items()))) # Verify APIClient().push is invoked as expected. self.assertEqual(docker_client.push.call_count, 1) args, kwargs = docker_client.push.call_args self.assertListEqual(list(args), [img_tag]) self.assertDictEqual(kwargs, {"stream": True}) # Verify logger info calls. self.assertEqual(MockLogger.info.call_count, 2) MockLogger.info.assert_has_calls( [ call(r"Building docker image: " + img_tag), call(r"Publishing docker image: " + img_tag), ] ) self.cleanup(lcb.docker_file_path)
33.663102
88
0.614376
14ef003d15190797ddc5315393b41267e7d36658
250
py
Python
test_iperf.py
Xiaoyu-Xing/CS3640Network-Applications
23a9d451ae91f64b53a28f5641d477ac30367808
[ "MIT" ]
null
null
null
test_iperf.py
Xiaoyu-Xing/CS3640Network-Applications
23a9d451ae91f64b53a28f5641d477ac30367808
[ "MIT" ]
null
null
null
test_iperf.py
Xiaoyu-Xing/CS3640Network-Applications
23a9d451ae91f64b53a28f5641d477ac30367808
[ "MIT" ]
null
null
null
def test_iperf(em_net, traffic_properties): source = traffic_properties.get('source') destination = traffic_properties.get('destination') return Mininet.iperf(em_net, hosts=[source,destination],seconds=traffic_properties.get('time'))
50
99
0.764
ac2b904d17c8593c5a54b54c86cdf8909bdd59a3
7,397
py
Python
bench/benchmark_loads.py
jaemk/orjson
129a821533ca8a33eaba64ec0aaeac2ee0c202fb
[ "Apache-2.0", "MIT" ]
null
null
null
bench/benchmark_loads.py
jaemk/orjson
129a821533ca8a33eaba64ec0aaeac2ee0c202fb
[ "Apache-2.0", "MIT" ]
null
null
null
bench/benchmark_loads.py
jaemk/orjson
129a821533ca8a33eaba64ec0aaeac2ee0c202fb
[ "Apache-2.0", "MIT" ]
null
null
null
# SPDX-License-Identifier: (Apache-2.0 OR MIT) from json import dumps as json_dumps from json import loads as json_loads from orjson import dumps as orjson_dumps from orjson import loads as orjson_loads from rapidjson import dumps as rapidjson_dumps from rapidjson import loads as rapidjson_loads from simplejson import dumps as simplejson_dumps from simplejson import loads as simplejson_loads from ujson import dumps as ujson_dumps from ujson import loads as ujson_loads from .util import read_fixture_str def test_loads_canada_orjson(benchmark): benchmark.group = "canada.json deserialization" benchmark.extra_info["lib"] = "orjson" data = read_fixture_str("canada.json.xz") benchmark.extra_info["correct"] = json_loads( orjson_dumps(orjson_loads(data)) ) == json_loads(data) benchmark(orjson_loads, data) def test_loads_canada_ujson(benchmark): benchmark.group = "canada.json deserialization" benchmark.extra_info["lib"] = "ujson" data = read_fixture_str("canada.json.xz") benchmark.extra_info["correct"] = json_loads( ujson_dumps(ujson_loads(data)) ) == json_loads(data) benchmark(ujson_loads, data) def test_loads_canada_json(benchmark): benchmark.group = "canada.json deserialization" benchmark.extra_info["lib"] = "json" data = read_fixture_str("canada.json.xz") benchmark.extra_info["correct"] = json_loads( json_dumps(json_loads(data)) ) == json_loads(data) benchmark(json_loads, data) def test_loads_canada_rapidjson(benchmark): benchmark.group = "canada.json deserialization" benchmark.extra_info["lib"] = "rapidjson" data = read_fixture_str("canada.json.xz") benchmark.extra_info["correct"] = json_loads( rapidjson_dumps(rapidjson_loads(data)) ) == json_loads(data) benchmark(rapidjson_loads, data) def test_loads_canada_simplejson(benchmark): benchmark.group = "canada.json deserialization" benchmark.extra_info["lib"] = "simplejson" data = read_fixture_str("canada.json.xz") benchmark.extra_info["correct"] = json_loads( simplejson_dumps(simplejson_loads(data)) ) == json_loads(data) benchmark(simplejson_loads, data) def test_loads_citm_catalog_orjson(benchmark): benchmark.group = "citm_catalog.json deserialization" benchmark.extra_info["lib"] = "orjson" data = read_fixture_str("citm_catalog.json.xz") benchmark.extra_info["correct"] = json_loads( orjson_dumps(orjson_loads(data)) ) == json_loads(data) benchmark(orjson_loads, data) def test_loads_citm_catalog_ujson(benchmark): benchmark.group = "citm_catalog.json deserialization" benchmark.extra_info["lib"] = "ujson" data = read_fixture_str("citm_catalog.json.xz") benchmark.extra_info["correct"] = json_loads( ujson_dumps(ujson_loads(data)) ) == json_loads(data) benchmark(ujson_loads, data) def test_loads_citm_catalog_json(benchmark): benchmark.group = "citm_catalog.json deserialization" benchmark.extra_info["lib"] = "json" data = read_fixture_str("citm_catalog.json.xz") benchmark.extra_info["correct"] = json_loads( json_dumps(json_loads(data)) ) == json_loads(data) benchmark(json_loads, data) def test_loads_citm_catalog_rapidjson(benchmark): benchmark.group = "citm_catalog.json deserialization" benchmark.extra_info["lib"] = "rapidjson" data = read_fixture_str("citm_catalog.json.xz") benchmark.extra_info["correct"] = json_loads( rapidjson_dumps(rapidjson_loads(data)) ) == json_loads(data) benchmark(rapidjson_loads, data) def test_loads_citm_catalog_simplejson(benchmark): benchmark.group = "citm_catalog.json deserialization" benchmark.extra_info["lib"] = "simplejson" data = read_fixture_str("citm_catalog.json.xz") benchmark.extra_info["correct"] = json_loads( simplejson_dumps(simplejson_loads(data)) ) == json_loads(data) benchmark(simplejson_loads, data) def test_loads_github_orjson(benchmark): benchmark.group = "github.json deserialization" benchmark.extra_info["lib"] = "orjson" data = read_fixture_str("github.json.xz") benchmark.extra_info["correct"] = json_loads( orjson_dumps(orjson_loads(data)) ) == json_loads(data) benchmark(orjson_loads, data) def test_loads_github_ujson(benchmark): benchmark.group = "github.json deserialization" benchmark.extra_info["lib"] = "ujson" data = read_fixture_str("github.json.xz") benchmark.extra_info["correct"] = json_loads( ujson_dumps(ujson_loads(data)) ) == json_loads(data) benchmark(ujson_loads, data) def test_loads_github_json(benchmark): benchmark.group = "github.json deserialization" benchmark.extra_info["lib"] = "json" data = read_fixture_str("github.json.xz") benchmark.extra_info["correct"] = json_loads( json_dumps(json_loads(data)) ) == json_loads(data) benchmark(json_loads, data) def test_loads_github_rapidjson(benchmark): benchmark.group = "github.json deserialization" benchmark.extra_info["lib"] = "rapidjson" data = read_fixture_str("github.json.xz") benchmark.extra_info["correct"] = json_loads( rapidjson_dumps(rapidjson_loads(data)) ) == json_loads(data) benchmark(rapidjson_loads, data) def test_loads_github_simplejson(benchmark): benchmark.group = "github.json deserialization" benchmark.extra_info["lib"] = "simplejson" data = read_fixture_str("github.json.xz") benchmark.extra_info["correct"] = json_loads( simplejson_dumps(simplejson_loads(data)) ) == json_loads(data) benchmark(simplejson_loads, data) def test_loads_twitter_orjson(benchmark): benchmark.group = "twitter.json deserialization" benchmark.extra_info["lib"] = "orjson" data = read_fixture_str("twitter.json.xz") benchmark.extra_info["correct"] = json_loads( orjson_dumps(orjson_loads(data)) ) == json_loads(data) benchmark(orjson_loads, data) def test_loads_twitter_ujson(benchmark): benchmark.group = "twitter.json deserialization" benchmark.extra_info["lib"] = "ujson" data = read_fixture_str("twitter.json.xz") benchmark.extra_info["correct"] = json_loads( ujson_dumps(ujson_loads(data)) ) == json_loads(data) benchmark(ujson_loads, data) def test_loads_twitter_json(benchmark): benchmark.group = "twitter.json deserialization" benchmark.extra_info["lib"] = "json" data = read_fixture_str("twitter.json.xz") benchmark.extra_info["correct"] = json_loads( json_dumps(json_loads(data)) ) == json_loads(data) benchmark(json_loads, data) def test_loads_twitter_rapidjson(benchmark): benchmark.group = "twitter.json deserialization" benchmark.extra_info["lib"] = "rapidjson" data = read_fixture_str("twitter.json.xz") benchmark.extra_info["correct"] = json_loads( rapidjson_dumps(rapidjson_loads(data)) ) == json_loads(data) benchmark(rapidjson_loads, data) def test_loads_twitter_simplejson(benchmark): benchmark.group = "twitter.json deserialization" benchmark.extra_info["lib"] = "simplejson" data = read_fixture_str("twitter.json.xz") benchmark.extra_info["correct"] = json_loads( simplejson_dumps(simplejson_loads(data)) ) == json_loads(data) benchmark(simplejson_loads, data)
34.087558
57
0.726646
0a4db237dc32b5d091694e059164577d8adba25b
2,060
py
Python
src/ml_models/model_make.py
kackey0-1/project_ai
8b309eb1e0cbc46e4a9b134adcd0b44e5657f709
[ "MIT" ]
1
2022-02-24T16:24:56.000Z
2022-02-24T16:24:56.000Z
src/ml_models/model_make.py
kackey0-1/project_ai
8b309eb1e0cbc46e4a9b134adcd0b44e5657f709
[ "MIT" ]
2
2021-12-27T15:12:09.000Z
2022-02-06T14:14:36.000Z
src/ml_models/model_make.py
kackey0-1/project_ai
8b309eb1e0cbc46e4a9b134adcd0b44e5657f709
[ "MIT" ]
null
null
null
from keras.datasets import mnist from keras.layers import Dense, Dropout, Flatten, Activation from keras.layers import Conv2D, MaxPooling2D from keras.models import Sequential from keras.utils.np_utils import to_categorical # from keras.utils.vis_utils import plot_model import numpy as np import matplotlib.pyplot as plt # モデルの保存 # import os # from google.colab import files # データのロード (X_train, y_train), (X_test, y_test) = mnist.load_data() # 今回は全データのうち、学習には300、テストには100個のデータを使用 # Convレイヤーは4次元配列を受け取る(バッチサイズx縦x横xチャンネル数) # MNISTのデータはRGB画像ではなくもともと3次元のデータとなっているので予め4次元に変換 X_train = X_train.reshape(-1, 28, 28, 1) X_test = X_test.reshape(-1, 28, 28, 1) y_train = to_categorical(y_train) y_test = to_categorical(y_test) # モデルの定義 model = Sequential() model.add(Conv2D(filters=32, kernel_size=(3, 3), input_shape=(28, 28, 1))) model.add(Activation("relu")) model.add(Conv2D(filters=64, kernel_size=(3, 3))) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128)) model.add(Activation("relu")) model.add(Dropout(0.5)) model.add(Dense(10)) model.add(Activation("softmax")) model.compile( loss="categorical_crossentropy", optimizer="adadelta", metrics=["accuracy"] ) model.fit( X_train, y_train, batch_size=128, epochs=50, verbose=1, validation_data=(X_test, y_test), ) # 精度の評価 scores = model.evaluate(X_test, y_test, verbose=1) print("Test loss:", scores[0]) print("Test accuracy:", scores[1]) # データの可視化(検証データの先頭の10枚) # for i in range(10): # plt.subplot(2, 5, i + 1) # plt.imshow(X_test[i].reshape((28, 28)), "gray") # plt.suptitle("10 images of test data", fontsize=20) # plt.show() # 予測(検証データの先頭の10枚) prediction = np.argmax(model.predict(X_test[0:10]), axis=1) print(prediction) model.summary() # resultsディレクトリを作成 # result_dir = 'results' # if not os.path.exists(result_dir): # os.mkdir(result_dir) # 重みを保存 # model.save(os.path.join(result_dir, 'model.h5')) model.save("/model.h5") # files.download('/content/results/model.h5')
25.432099
79
0.730583
61dcb688d7dead2ac848f4db3382b961b8947a7e
1,084
py
Python
sdks/python/test/test_v2MissingSymbolCrashGroupsResponse.py
Brantone/appcenter-sdks
eeb063ecf79908b6e341fb00196d2cd9dc8f3262
[ "MIT" ]
null
null
null
sdks/python/test/test_v2MissingSymbolCrashGroupsResponse.py
Brantone/appcenter-sdks
eeb063ecf79908b6e341fb00196d2cd9dc8f3262
[ "MIT" ]
6
2019-10-23T06:38:53.000Z
2022-01-22T07:57:58.000Z
sdks/python/test/test_v2MissingSymbolCrashGroupsResponse.py
Brantone/appcenter-sdks
eeb063ecf79908b6e341fb00196d2cd9dc8f3262
[ "MIT" ]
2
2019-10-23T06:31:05.000Z
2021-08-21T17:32:47.000Z
# coding: utf-8 """ App Center Client Microsoft Visual Studio App Center API # noqa: E501 OpenAPI spec version: preview Contact: benedetto.abbenanti@gmail.com Project Repository: https://github.com/b3nab/appcenter-sdks """ from __future__ import absolute_import import unittest import appcenter_sdk from v2MissingSymbolCrashGroupsResponse.clsv2MissingSymbolCrashGroupsResponse import v2MissingSymbolCrashGroupsResponse # noqa: E501 from appcenter_sdk.rest import ApiException class Testv2MissingSymbolCrashGroupsResponse(unittest.TestCase): """v2MissingSymbolCrashGroupsResponse unit test stubs""" def setUp(self): pass def tearDown(self): pass def testv2MissingSymbolCrashGroupsResponse(self): """Test v2MissingSymbolCrashGroupsResponse""" # FIXME: construct object with mandatory attributes with example values # model = appcenter_sdk.models.clsv2MissingSymbolCrashGroupsResponse.v2MissingSymbolCrashGroupsResponse() # noqa: E501 pass if __name__ == '__main__': unittest.main()
27.1
133
0.760148
4dc6f19452f1928a12cb21256eb1100495d990ef
2,539
py
Python
session11.py
sahanashetty31/session_11_epai3_assignment
4b2d7f299fea2d3cb6f8cb1c90804f3cc4976647
[ "MIT" ]
null
null
null
session11.py
sahanashetty31/session_11_epai3_assignment
4b2d7f299fea2d3cb6f8cb1c90804f3cc4976647
[ "MIT" ]
null
null
null
session11.py
sahanashetty31/session_11_epai3_assignment
4b2d7f299fea2d3cb6f8cb1c90804f3cc4976647
[ "MIT" ]
null
null
null
import math from functools import lru_cache class Polygon: def __init__(self, n, R): if n < 3: raise ValueError('Polygon must have at least 3 vertices.') self._n = n self._R = R def __repr__(self): return f'Polygon(n={self._n}, R={self._R})' @property def count_vertices(self): return self._n @property def count_edges(self): return self._n @property def circumradius(self): return self._R @property def interior_angle(self): return (self._n - 2) * 180 / self._n @property def side_length(self): return 2 * self._R * math.sin(math.pi / self._n) @property def apothem(self): return self._R * math.cos(math.pi / self._n) @property def area(self): return self._n / 2 * self.side_length * self.apothem @property def perimeter(self): return self._n * self.side_length def __eq__(self, other): if isinstance(other, self.__class__): return (self.count_edges == other.count_edges and self.circumradius == other.circumradius) else: return NotImplemented def __gt__(self, other): if isinstance(other, self.__class__): return self.count_vertices > other.count_vertices else: return NotImplemented class Polygons: def __init__(self, m, R): if m < 3: raise ValueError('m must be greater than 3') self._m = m self._R = R self._polygons = [Polygon(i, R) for i in range(3, m+1)] def __len__(self): return self._m - 2 def __repr__(self): return f'Polygons(m={self._m}, R={self._R})' def __getitem__(self, s): return self._polygons[s] def __iter__(self): return self.PolygonIterator(self) @property def max_efficiency_polygon(self): sorted_polygons = sorted(self._polygons, key=lambda p: p.area/p.perimeter, reverse=True) return sorted_polygons[0] class PolyIterator: def __init__(self, poly_obj): self._poly_obj = poly_obj self._index = 0 def __iter__(self): return self def __next__(self): if self._index >= len(self._poly_obj): raise StopIteration else: item = self._poly_obj._polygons[self._index] self._index += 1 return item
25.39
102
0.568334
fe65a76f43abc8e714edda32827fb4a6cfa2e1f3
1,077
py
Python
test/test_app.py
juaneml/IV_1819_Proyecto
33c5b10bee15c937c29db63752f604d546232d17
[ "MIT" ]
1
2019-01-22T11:43:46.000Z
2019-01-22T11:43:46.000Z
test/test_app.py
juaneml/IV_1819_Proyecto
33c5b10bee15c937c29db63752f604d546232d17
[ "MIT" ]
34
2018-10-01T07:06:01.000Z
2019-02-13T09:27:23.000Z
test/test_app.py
juaneml/IV_1819_Proyecto
33c5b10bee15c937c29db63752f604d546232d17
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import unittest, json, requests import sys from requests import * sys.path.append('../src/') url = 'https://proyecto-iv.herokuapp.com/' class testProyecto(unittest.TestCase): def test_url_raiz(self): response = requests.get(url) self.assertEqual(response.json()['status'],'OK', "Aplicación con status OK") def test_url_status(self): response = requests.get(url+'/status') self.assertEqual(response.json()["noticias"]["ruta"],"/noticias", "Correcto") def test_url_noticias(self): response = requests.get(url+'/noticias') self.assertEqual(response.json()['Noticia']['Titulo'],"Granada, una ciudad con encanto","titulo correcto") def test_post_noticias(self): response = requests.post(url+'/method_post') assert(response.status_code == 200) def test_put_noticias(self): response = requests.put(url+'method_put?noticia=una noticia&anterior=es anterior&nuevo=es nuevo') assert(response.status_code == 200) if __name__ == '__main__': unittest.main()
32.636364
114
0.675023
5f7341d1af34c450f10f2c9a9df5425e8e006e37
870
py
Python
algorithms/different-ways-to-add-parentheses.py
Chronoviser/leetcode-1
65ee0504d64c345f822f216fef6e54dd62b8f858
[ "MIT" ]
41
2018-07-03T07:35:30.000Z
2021-09-25T09:33:43.000Z
algorithms/different-ways-to-add-parentheses.py
Chronoviser/leetcode-1
65ee0504d64c345f822f216fef6e54dd62b8f858
[ "MIT" ]
2
2018-07-23T10:50:11.000Z
2020-10-06T07:34:29.000Z
algorithms/different-ways-to-add-parentheses.py
Chronoviser/leetcode-1
65ee0504d64c345f822f216fef6e54dd62b8f858
[ "MIT" ]
7
2018-07-06T13:43:18.000Z
2020-10-06T02:29:57.000Z
class Solution: def diffWaysToCompute(self, input): """ :type input: str :rtype: List[int] """ result = [] for i in range(len(input)): if input[i] == '+' or input[i] == '-' or input[i] == '*': left = self.diffWaysToCompute(input[:i]) right = self.diffWaysToCompute(input[i+1:]) for j in range(len(left)): for k in range(len(right)): if input[i] == '+': result.append(left[j] + right[k]) elif input[i] == '-': result.append(left[j] - right[k]) else: result.append(left[j] * right[k]) if len(result) == 0: result.append(eval(input)) return sorted(result)
36.25
69
0.411494
da1a65dc1a9188bb179d5bb58e03dfb2228b8378
1,842
py
Python
tests/integration/test_warcraft_client_creature.py
tehmufifnman/BattleMuffin-Python
f0bb5ee7024624191b33441aeecf3fb29570abe7
[ "MIT" ]
7
2020-05-15T18:09:23.000Z
2021-03-08T16:10:37.000Z
tests/integration/test_warcraft_client_creature.py
tehmufifnman/BattleMuffin-Python
f0bb5ee7024624191b33441aeecf3fb29570abe7
[ "MIT" ]
2
2020-04-20T04:42:37.000Z
2020-10-28T23:27:07.000Z
tests/integration/test_warcraft_client_creature.py
tehmufifnman/BattleMuffin-Python
f0bb5ee7024624191b33441aeecf3fb29570abe7
[ "MIT" ]
2
2020-05-18T06:58:53.000Z
2021-03-08T16:10:27.000Z
import os from battlemuffin.clients.warcraft_client import WarcraftClient from battlemuffin.config.region_config import Locale, Region def test_get_creature_families_index(snapshot): client = WarcraftClient( os.getenv("CLIENT_ID"), os.getenv("CLIENT_SECRET"), Region.us, Locale.en_US ) response = client.get_creature_families_index() assert response == snapshot def test_get_creature_family(snapshot): client = WarcraftClient( os.getenv("CLIENT_ID"), os.getenv("CLIENT_SECRET"), Region.us, Locale.en_US ) response = client.get_creature_family(1) assert response == snapshot def test_get_creature_types_index(snapshot): client = WarcraftClient( os.getenv("CLIENT_ID"), os.getenv("CLIENT_SECRET"), Region.us, Locale.en_US ) response = client.get_creature_types_index() assert response == snapshot def test_get_creature_type(snapshot): client = WarcraftClient( os.getenv("CLIENT_ID"), os.getenv("CLIENT_SECRET"), Region.us, Locale.en_US ) response = client.get_creature_type(1) assert response == snapshot def test_get_creature(snapshot): client = WarcraftClient( os.getenv("CLIENT_ID"), os.getenv("CLIENT_SECRET"), Region.us, Locale.en_US ) response = client.get_creature(42722) assert response == snapshot def test_get_creature_display_media(snapshot): client = WarcraftClient( os.getenv("CLIENT_ID"), os.getenv("CLIENT_SECRET"), Region.us, Locale.en_US ) response = client.get_creature_display_media(30221) assert response == snapshot def test_get_creature_family_media(snapshot): client = WarcraftClient( os.getenv("CLIENT_ID"), os.getenv("CLIENT_SECRET"), Region.us, Locale.en_US ) response = client.get_creature_family_media(1) assert response == snapshot
30.196721
83
0.724756
a5e7418dbebb0cb450ddf91940fecb4f48028cd1
1,149
py
Python
unit_tests/test_reports.py
teatauri/Collab-2-Panopto
84972d3c5b32e741149dd857b284ca02cdc32e59
[ "MIT" ]
2
2021-07-27T12:08:33.000Z
2021-12-18T23:00:51.000Z
unit_tests/test_reports.py
teatauri/Collab-2-Panopto
84972d3c5b32e741149dd857b284ca02cdc32e59
[ "MIT" ]
2
2021-07-29T11:22:58.000Z
2021-07-29T11:25:12.000Z
unit_tests/test_reports.py
teatauri/Collab-2-Panopto
84972d3c5b32e741149dd857b284ca02cdc32e59
[ "MIT" ]
1
2022-01-18T12:47:48.000Z
2022-01-18T12:47:48.000Z
import pytest import os from src.views import Reports BASE = os.getcwd() + "/unit_tests/unit_test_data" recording_data = { "recording_id": "19ceeea8cb984bd79a6d...967af9db12", "recording_name": "testvid 2 - public_2", "duration": 119000, "storage_size": 13837851, "created": "2021-05-19T17:11:45.331Z", } uuid = "f61d43526fe343039f0a8334d1dd0af2" # NOTE tests not implemented because currently unsure if reporting will be kept # TODO possibly implement tests for logging processes ###################### ### POSITIVE TESTS ### ###################### def test_report_entry_returns_correct_data(): pass def test_report_403_entry_returns_correct_data(): pass def test_append_report_entry_appends_correct_data(): pass def test_append_report_403_entry_appends_correct_data(): pass def test_generate_reports_generates_correct_report(): pass def test_create_collab_download_report(): pass def test_create_collab_403_download_report(): pass ###################### ### NEGATIVE TESTS ### ###################### ######################## ### HELPER FUNCTIONS ### ########################
18.532258
79
0.657963
5a59ec924f47a85dc833205098c1b79ee180ad16
6,762
py
Python
train.py
psethwick/Fast-Neural-Style-Transfer
414b0a2ab08e01f9238671589a7794efad68a44d
[ "MIT" ]
null
null
null
train.py
psethwick/Fast-Neural-Style-Transfer
414b0a2ab08e01f9238671589a7794efad68a44d
[ "MIT" ]
null
null
null
train.py
psethwick/Fast-Neural-Style-Transfer
414b0a2ab08e01f9238671589a7794efad68a44d
[ "MIT" ]
null
null
null
import argparse import os import sys import random from PIL import Image import numpy as np import torch import glob from torch.optim import Adam from torch.utils.data import DataLoader from torchvision import datasets from torchvision.utils import save_image from models import TransformerNet, VGG16 from utils import * #from matplotlib import pyplot as plt from IPython.display import Image as DisplayImage class ImageFolderWithPaths(datasets.ImageFolder): """Custom dataset that includes image file paths. Extends torchvision.datasets.ImageFolder """ # override the __getitem__ method. this is the method that dataloader calls def __getitem__(self, index): # this is what ImageFolder normally returns original_tuple = super(ImageFolderWithPaths, self).__getitem__(index) # the image file path path = self.imgs[index][0] # make a new tuple that includes original and the path tuple_with_path = (original_tuple + (path,)) return tuple_with_path def train(dataset_path, style_image="style-images/mosaic.jpg", epochs=1, batch_size=4, image_size=256, style_size=256, \ lambda_content=1e5, lambda_style=1e10, lr=1e-3, checkpoint_model=None, checkpoint_interval=2000, sample_interval=1000): # parser = argparse.ArgumentParser(description="Parser for Fast-Neural-Style") # parser.add_argument("--dataset_path", type=str, required=True, help="path to training dataset") # parser.add_argument("--style_image", type=str, default="style-images/mosaic.jpg", help="path to style image") # parser.add_argument("--epochs", type=int, default=1, help="Number of training epochs") # parser.add_argument("--batch_size", type=int, default=4, help="Batch size for training") # parser.add_argument("--image_size", type=int, default=256, help="Size of training images") # parser.add_argument("--style_size", type=int, help="Size of style image") # parser.add_argument("--lambda_content", type=float, default=1e5, help="Weight for content loss") # parser.add_argument("--lambda_style", type=float, default=1e10, help="Weight for style loss") # parser.add_argument("--lr", type=float, default=1e-3, help="Learning rate") # parser.add_argument("--checkpoint_model", type=str, help="Optional path to checkpoint model") # parser.add_argument("--checkpoint_interval", type=int, default=2000, help="Batches between saving model") # parser.add_argument("--sample_interval", type=int, default=1000, help="Batches between saving image samples") # = parser.parse_) style_name = style_image.split("/")[-1].split(".")[0] os.makedirs("images/outputs/"+style_name+"-training", exist_ok=True) os.makedirs("checkpoints", exist_ok=True) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Create dataloader for the training data train_dataset = ImageFolderWithPaths(dataset_path, train_transform(image_size)) dataloader = DataLoader(train_dataset, batch_size=batch_size) # Defines networks transformer = TransformerNet().to(device) vgg = VGG16(requires_grad=False).to(device) # Load checkpoint model if specified if checkpoint_model: transformer.load_state_dict(torch.load(checkpoint_model)) # Define optimizer and loss optimizer = Adam(transformer.parameters(), lr) l2_loss = torch.nn.MSELoss().to(device) # Load style image style = style_transform(style_size)(Image.open(style_image)) style = style.repeat(batch_size, 1, 1, 1).to(device) # Extract style features features_style = vgg(style) gram_style = [gram_matrix(y) for y in features_style] # Sample 8 images for visual evaluation of the model image_samples = [] for path in random.sample(glob.glob(dataset_path+"/*/*.jpg"), 8): image_samples += [style_transform(image_size)(Image.open(path))] image_samples = torch.stack(image_samples) def save_sample(batches_done): """ Evaluates the model and saves image samples """ transformer.eval() with torch.no_grad(): output = transformer(image_samples.to(device)) image_grid = denormalize(torch.cat((image_samples.cpu(), output.cpu()), 2)) image_path = "images/outputs/"+style_name+"-training/"+str(batches_done)+".jpg" save_image(image_grid, image_path, nrow=4) #plt.show(image_grid) DisplayImage(image_path) transformer.train() for epoch in range(epochs): epoch_metrics = {"content": [], "style": [], "total": []} for batch_i, (images, _, paths) in enumerate(dataloader): optimizer.zero_grad() images_original = images.to(device) images_transformed = transformer(images_original) # sys.stdout.write("\n".join(paths)) # Extract features features_original = vgg(images_original) features_transformed = vgg(images_transformed) # Compute content loss as MSE between features content_loss = lambda_content * l2_loss(features_transformed.relu2_2, features_original.relu2_2) # Compute style loss as MSE between gram matrices style_loss = 0 for ft_y, gm_s in zip(features_transformed, gram_style): gm_y = gram_matrix(ft_y) style_loss += l2_loss(gm_y, gm_s[: images.size(0), :, :]) style_loss *= lambda_style total_loss = content_loss + style_loss total_loss.backward() optimizer.step() epoch_metrics["content"] += [content_loss.item()] epoch_metrics["style"] += [style_loss.item()] epoch_metrics["total"] += [total_loss.item()] sys.stdout.write( "\r[Epoch %d/%d] [Batch %d/%d] [Content: %.2f (%.2f) Style: %.2f (%.2f) Total: %.2f (%.2f)]" % ( epoch + 1, epochs, batch_i, len(train_dataset)/batch_size, content_loss.item(), np.mean(epoch_metrics["content"]), style_loss.item(), np.mean(epoch_metrics["style"]), total_loss.item(), np.mean(epoch_metrics["total"]), ) ) batches_done = epoch * len(dataloader) + batch_i + 1 if batches_done % sample_interval == 0: save_sample(batches_done) if checkpoint_interval > 0 and batches_done % checkpoint_interval == 0: style_name = os.path.basename(style_image).split(".")[0] torch.save(transformer.state_dict(), "checkpoints/"+style_name+"_"+str(batches_done)+".pth")
43.909091
128
0.657646
2b7089a1323a1471ceac62c665589df3b5adab95
32,241
py
Python
ssg/build_yaml.py
manywho/content
8f4641af53d7cc7d0e42699ed95553943e5e6af1
[ "BSD-3-Clause" ]
null
null
null
ssg/build_yaml.py
manywho/content
8f4641af53d7cc7d0e42699ed95553943e5e6af1
[ "BSD-3-Clause" ]
null
null
null
ssg/build_yaml.py
manywho/content
8f4641af53d7cc7d0e42699ed95553943e5e6af1
[ "BSD-3-Clause" ]
null
null
null
from __future__ import absolute_import from __future__ import print_function import os import os.path import datetime import sys import yaml from .constants import XCCDF_PLATFORM_TO_CPE from .constants import PRODUCT_TO_CPE_MAPPING from .rules import get_rule_dir_id, get_rule_dir_yaml, is_rule_dir from .checks import is_cce_format_valid, is_cce_value_valid from .yaml import open_and_expand, open_and_macro_expand from .utils import required_key, mkdir_p from .xml import ElementTree as ET from .shims import unicode_func def add_sub_element(parent, tag, data): """ Creates a new child element under parent with tag tag, and sets data as the content under the tag. In particular, data is a string to be parsed as an XML tree, allowing sub-elements of children to be added. If data should not be parsed as an XML tree, either escape the contents before passing into this function, or use ElementTree.SubElement(). Returns the newly created subelement of type tag. """ # This is used because our YAML data contain XML and XHTML elements # ET.SubElement() escapes the < > characters by &lt; and &gt; # and therefore it does not add child elements # we need to do a hack instead # TODO: Remove this function after we move to Markdown everywhere in SSG ustr = unicode_func("<{0}>{1}</{0}>").format(tag, data) try: element = ET.fromstring(ustr.encode("utf-8")) except Exception: msg = ("Error adding subelement to an element '{0}' from string: '{1}'" .format(parent.tag, ustr)) raise RuntimeError(msg) parent.append(element) return element def add_warning_elements(element, warnings): # The use of [{dict}, {dict}] in warnings is to handle the following # scenario where multiple warnings have the same category which is # valid in SCAP and our content: # # warnings: # - general: Some general warning # - general: Some other general warning # - general: |- # Some really long multiline general warning # # Each of the {dict} should have only one key/value pair. for warning_dict in warnings: warning = add_sub_element(element, "warning", list(warning_dict.values())[0]) warning.set("category", list(warning_dict.keys())[0]) class Profile(object): """Represents XCCDF profile """ def __init__(self, id_): self.id_ = id_ self.title = "" self.description = "" self.extends = None self.selections = [] @staticmethod def from_yaml(yaml_file, env_yaml=None): yaml_contents = open_and_expand(yaml_file, env_yaml) if yaml_contents is None: return None basename, _ = os.path.splitext(os.path.basename(yaml_file)) profile = Profile(basename) profile.title = required_key(yaml_contents, "title") del yaml_contents["title"] profile.description = required_key(yaml_contents, "description") del yaml_contents["description"] profile.extends = yaml_contents.pop("extends", None) profile.selections = required_key(yaml_contents, "selections") del yaml_contents["selections"] if yaml_contents: raise RuntimeError("Unparsed YAML data in '%s'.\n\n%s" % (yaml_file, yaml_contents)) return profile def to_xml_element(self): element = ET.Element('Profile') element.set("id", self.id_) if self.extends: element.set("extends", self.extends) title = add_sub_element(element, "title", self.title) title.set("override", "true") desc = add_sub_element(element, "description", self.description) desc.set("override", "true") for selection in self.selections: if selection.startswith("!"): unselect = ET.Element("select") unselect.set("idref", selection[1:]) unselect.set("selected", "false") element.append(unselect) elif "=" in selection: refine_value = ET.Element("refine-value") value_id, selector = selection.split("=", 1) refine_value.set("idref", value_id) refine_value.set("selector", selector) element.append(refine_value) else: select = ET.Element("select") select.set("idref", selection) select.set("selected", "true") element.append(select) return element def get_rule_selectors(self): return list(filter(lambda x: "=" not in x, self.selections)) def get_variable_selectors(self): variables = dict() for var_selection in filter(lambda x: "=" in x, self.selections): k, v = var_selection.split("=") variables[k] = v return variables class Value(object): """Represents XCCDF Value """ def __init__(self, id_): self.id_ = id_ self.title = "" self.description = "" self.type_ = "string" self.operator = "equals" self.interactive = False self.options = {} self.warnings = [] @staticmethod def from_yaml(yaml_file, env_yaml=None): yaml_contents = open_and_expand(yaml_file, env_yaml) if yaml_contents is None: return None value_id, _ = os.path.splitext(os.path.basename(yaml_file)) value = Value(value_id) value.title = required_key(yaml_contents, "title") del yaml_contents["title"] value.description = required_key(yaml_contents, "description") del yaml_contents["description"] value.type_ = required_key(yaml_contents, "type") del yaml_contents["type"] value.operator = yaml_contents.pop("operator", "equals") possible_operators = ["equals", "not equal", "greater than", "less than", "greater than or equal", "less than or equal", "pattern match"] if value.operator not in possible_operators: raise ValueError( "Found an invalid operator value '%s' in '%s'. " "Expected one of: %s" % (value.operator, yaml_file, ", ".join(possible_operators)) ) value.interactive = \ yaml_contents.pop("interactive", "false").lower() == "true" value.options = required_key(yaml_contents, "options") del yaml_contents["options"] value.warnings = yaml_contents.pop("warnings", []) for warning_list in value.warnings: if len(warning_list) != 1: raise ValueError("Only one key/value pair should exist for each dictionary") if yaml_contents: raise RuntimeError("Unparsed YAML data in '%s'.\n\n%s" % (yaml_file, yaml_contents)) return value def to_xml_element(self): value = ET.Element('Value') value.set('id', self.id_) value.set('type', self.type_) if self.operator != "equals": # equals is the default value.set('operator', self.operator) if self.interactive: # False is the default value.set('interactive', 'true') title = ET.SubElement(value, 'title') title.text = self.title add_sub_element(value, 'description', self.description) add_warning_elements(value, self.warnings) for selector, option in self.options.items(): # do not confuse Value with big V with value with small v # value is child element of Value value_small = ET.SubElement(value, 'value') # by XCCDF spec, default value is value without selector if selector != "default": value_small.set('selector', str(selector)) value_small.text = str(option) return value def to_file(self, file_name): root = self.to_xml_element() tree = ET.ElementTree(root) tree.write(file_name) class Benchmark(object): """Represents XCCDF Benchmark """ def __init__(self, id_): self.id_ = id_ self.title = "" self.status = "" self.description = "" self.notice_id = "" self.notice_description = "" self.front_matter = "" self.rear_matter = "" self.cpes = [] self.version = "0.1" self.profiles = [] self.values = {} self.bash_remediation_fns_group = None self.groups = {} self.rules = {} # This is required for OCIL clauses conditional_clause = Value("conditional_clause") conditional_clause.title = "A conditional clause for check statements." conditional_clause.description = conditional_clause.title conditional_clause.type_ = "string" conditional_clause.options = {"": "This is a placeholder"} self.add_value(conditional_clause) @staticmethod def from_yaml(yaml_file, id_, product_yaml=None): yaml_contents = open_and_macro_expand(yaml_file, product_yaml) if yaml_contents is None: return None benchmark = Benchmark(id_) benchmark.title = required_key(yaml_contents, "title") del yaml_contents["title"] benchmark.status = required_key(yaml_contents, "status") del yaml_contents["status"] benchmark.description = required_key(yaml_contents, "description") del yaml_contents["description"] notice_contents = required_key(yaml_contents, "notice") benchmark.notice_id = required_key(notice_contents, "id") del notice_contents["id"] benchmark.notice_description = required_key(notice_contents, "description") del notice_contents["description"] if not notice_contents: del yaml_contents["notice"] benchmark.front_matter = required_key(yaml_contents, "front-matter") del yaml_contents["front-matter"] benchmark.rear_matter = required_key(yaml_contents, "rear-matter") del yaml_contents["rear-matter"] benchmark.version = str(required_key(yaml_contents, "version")) del yaml_contents["version"] if yaml_contents: raise RuntimeError("Unparsed YAML data in '%s'.\n\n%s" % (yaml_file, yaml_contents)) if product_yaml: benchmark.cpes = PRODUCT_TO_CPE_MAPPING[product_yaml["product"]] return benchmark def add_profiles_from_dir(self, action, dir_, env_yaml): for dir_item in os.listdir(dir_): dir_item_path = os.path.join(dir_, dir_item) if not os.path.isfile(dir_item_path): continue _, ext = os.path.splitext(os.path.basename(dir_item_path)) if ext != '.profile': sys.stderr.write( "Encountered file '%s' while looking for profiles, " "extension '%s' is unknown. Skipping..\n" % (dir_item, ext) ) continue self.profiles.append(Profile.from_yaml(dir_item_path, env_yaml)) if action == "list-inputs": print(dir_item_path) def add_bash_remediation_fns_from_file(self, action, file_): if action == "list-inputs": print(file_) else: tree = ET.parse(file_) self.bash_remediation_fns_group = tree.getroot() def to_xml_element(self): root = ET.Element('Benchmark') root.set('xmlns:xsi', 'http://www.w3.org/2001/XMLSchema-instance') root.set('xmlns:xhtml', 'http://www.w3.org/1999/xhtml') root.set('xmlns:dc', 'http://purl.org/dc/elements/1.1/') root.set('id', 'product-name') root.set('xsi:schemaLocation', 'http://checklists.nist.gov/xccdf/1.1 xccdf-1.1.4.xsd') root.set('style', 'SCAP_1.1') root.set('resolved', 'false') root.set('xml:lang', 'en-US') status = ET.SubElement(root, 'status') status.set('date', datetime.date.today().strftime("%Y-%m-%d")) status.text = self.status add_sub_element(root, "title", self.title) add_sub_element(root, "description", self.description) notice = add_sub_element(root, "notice", self.notice_description) notice.set('id', self.notice_id) add_sub_element(root, "front-matter", self.front_matter) add_sub_element(root, "rear-matter", self.rear_matter) for idref in self.cpes: plat = ET.SubElement(root, "platform") plat.set("idref", idref) version = ET.SubElement(root, 'version') version.text = self.version ET.SubElement(root, "metadata") for profile in self.profiles: if profile is not None: root.append(profile.to_xml_element()) for value in self.values.values(): root.append(value.to_xml_element()) if self.bash_remediation_fns_group is not None: root.append(self.bash_remediation_fns_group) for group in self.groups.values(): root.append(group.to_xml_element()) for rule in self.rules.values(): root.append(rule.to_xml_element()) return root def to_file(self, file_name): root = self.to_xml_element() tree = ET.ElementTree(root) tree.write(file_name) def add_value(self, value): if value is None: return self.values[value.id_] = value def add_group(self, group): if group is None: return self.groups[group.id_] = group def add_rule(self, rule): if rule is None: return self.rules[rule.id_] = rule def to_xccdf(self): """We can easily extend this script to generate a valid XCCDF instead of SSG SHORTHAND. """ raise NotImplementedError def __str__(self): return self.id_ class Group(object): """Represents XCCDF Group """ ATTRIBUTES_TO_PASS_ON = ( "platform", ) def __init__(self, id_): self.id_ = id_ self.prodtype = "all" self.title = "" self.description = "" self.warnings = [] self.values = {} self.groups = {} self.rules = {} self.platform = None @staticmethod def from_yaml(yaml_file, env_yaml=None): yaml_contents = open_and_macro_expand(yaml_file, env_yaml) if yaml_contents is None: return None group_id = os.path.basename(os.path.dirname(yaml_file)) group = Group(group_id) group.prodtype = yaml_contents.pop("prodtype", "all") group.title = required_key(yaml_contents, "title") del yaml_contents["title"] group.description = required_key(yaml_contents, "description") del yaml_contents["description"] group.warnings = yaml_contents.pop("warnings", []) group.platform = yaml_contents.pop("platform", None) for warning_list in group.warnings: if len(warning_list) != 1: raise ValueError("Only one key/value pair should exist for each dictionary") if yaml_contents: raise RuntimeError("Unparsed YAML data in '%s'.\n\n%s" % (yaml_file, yaml_contents)) return group def to_xml_element(self): group = ET.Element('Group') group.set('id', self.id_) if self.prodtype != "all": group.set("prodtype", self.prodtype) title = ET.SubElement(group, 'title') title.text = self.title add_sub_element(group, 'description', self.description) add_warning_elements(group, self.warnings) if self.platform: platform_el = ET.SubElement(group, "platform") try: platform_cpe = XCCDF_PLATFORM_TO_CPE[self.platform] except KeyError: raise ValueError("Unsupported platform '%s' in rule '%s'." % (self.platform, self.id_)) platform_el.set("idref", platform_cpe) for _value in self.values.values(): group.append(_value.to_xml_element()) for _group in self.groups.values(): group.append(_group.to_xml_element()) for _rule in self.rules.values(): group.append(_rule.to_xml_element()) return group def to_file(self, file_name): root = self.to_xml_element() tree = ET.ElementTree(root) tree.write(file_name) def add_value(self, value): if value is None: return self.values[value.id_] = value def add_group(self, group): if group is None: return if self.platform and not group.platform: group.platform = self.platform self.groups[group.id_] = group self._pass_our_properties_on_to(group) def _pass_our_properties_on_to(self, obj): for attr in self.ATTRIBUTES_TO_PASS_ON: if hasattr(obj, attr) and getattr(obj, attr) is None: setattr(obj, attr, getattr(self, attr)) def add_rule(self, rule): if rule is None: return if self.platform and not rule.platform: rule.platform = self.platform self.rules[rule.id_] = rule self._pass_our_properties_on_to(rule) def __str__(self): return self.id_ class Rule(object): """Represents XCCDF Rule """ YAML_KEYS_DEFAULTS = { "prodtype": lambda: "all", "title": lambda: RuntimeError("Missing key 'title'"), "description": lambda: RuntimeError("Missing key 'description'"), "rationale": lambda: RuntimeError("Missing key 'rationale'"), "severity": lambda: RuntimeError("Missing key 'severity'"), "references": lambda: dict(), "identifiers": lambda: dict(), "ocil_clause": lambda: None, "ocil": lambda: None, "oval_external_content": lambda: None, "warnings": lambda: list(), "platform": lambda: None, } def __init__(self, id_): self.id_ = id_ self.prodtype = "all" self.title = "" self.description = "" self.rationale = "" self.severity = "unknown" self.references = {} self.identifiers = {} self.ocil_clause = None self.ocil = None self.oval_external_content = None self.warnings = [] self.platform = None @staticmethod def from_yaml(yaml_file, env_yaml=None): yaml_file = os.path.normpath(yaml_file) yaml_contents = open_and_macro_expand(yaml_file, env_yaml) if yaml_contents is None: return None rule_id, ext = os.path.splitext(os.path.basename(yaml_file)) if rule_id == "rule" and ext == ".yml": rule_id = get_rule_dir_id(yaml_file) rule = Rule(rule_id) try: rule._set_attributes_from_dict(yaml_contents) except RuntimeError as exc: msg = ("Error processing '{fname}': {err}" .format(fname=yaml_file, err=str(exc))) raise RuntimeError(msg) for warning_list in rule.warnings: if len(warning_list) != 1: raise ValueError("Only one key/value pair should exist for each dictionary") if yaml_contents: raise RuntimeError("Unparsed YAML data in '%s'.\n\n%s" % (yaml_file, yaml_contents)) rule.validate_identifiers(yaml_file) rule.validate_references(yaml_file) return rule def _set_attributes_from_dict(self, yaml_contents): for key, default_getter in self.YAML_KEYS_DEFAULTS.items(): if key not in yaml_contents: value = default_getter() if isinstance(value, Exception): raise value else: value = yaml_contents.pop(key) setattr(self, key, value) def to_contents_dict(self): """ Returns a dictionary that is the same schema as the dict obtained when loading rule YAML. """ yaml_contents = dict() for key in Rule.YAML_KEYS_DEFAULTS: yaml_contents[key] = getattr(self, key) return yaml_contents def validate_identifiers(self, yaml_file): if self.identifiers is None: raise ValueError("Empty identifier section in file %s" % yaml_file) # Validate all identifiers are non-empty: for ident_type, ident_val in self.identifiers.items(): if not isinstance(ident_type, str) or not isinstance(ident_val, str): raise ValueError("Identifiers and values must be strings: %s in file %s" % (ident_type, yaml_file)) if ident_val.strip() == "": raise ValueError("Identifiers must not be empty: %s in file %s" % (ident_type, yaml_file)) if ident_type[0:3] == 'cce': if not is_cce_format_valid("CCE-" + ident_val): raise ValueError("CCE Identifier format must be valid: invalid format '%s' for CEE '%s'" " in file '%s'" % (ident_val, ident_type, yaml_file)) if not is_cce_value_valid("CCE-" + ident_val): raise ValueError("CCE Identifier value is not a valid checksum: invalid value '%s' for CEE '%s'" " in file '%s'" % (ident_val, ident_type, yaml_file)) def validate_references(self, yaml_file): if self.references is None: raise ValueError("Empty references section in file %s" % yaml_file) for ref_type, ref_val in self.references.items(): if not isinstance(ref_type, str) or not isinstance(ref_val, str): raise ValueError("References and values must be strings: %s in file %s" % (ref_type, yaml_file)) if ref_val.strip() == "": raise ValueError("References must not be empty: %s in file %s" % (ref_type, yaml_file)) for ref_type, ref_val in self.references.items(): for ref in ref_val.split(","): if ref.strip() != ref: msg = ( "Comma-separated '{ref_type}' reference " "in {yaml_file} contains whitespace." .format(ref_type=ref_type, yaml_file=yaml_file)) raise ValueError(msg) def to_xml_element(self): rule = ET.Element('Rule') rule.set('id', self.id_) if self.prodtype != "all": rule.set("prodtype", self.prodtype) rule.set('severity', self.severity) add_sub_element(rule, 'title', self.title) add_sub_element(rule, 'description', self.description) add_sub_element(rule, 'rationale', self.rationale) main_ident = ET.Element('ident') for ident_type, ident_val in self.identifiers.items(): if '@' in ident_type: # the ident is applicable only on some product # format : 'policy@product', eg. 'stigid@product' # for them, we create a separate <ref> element policy, product = ident_type.split('@') ident = ET.SubElement(rule, 'ident') ident.set(policy, ident_val) ident.set('prodtype', product) else: main_ident.set(ident_type, ident_val) if main_ident.attrib: rule.append(main_ident) main_ref = ET.Element('ref') for ref_type, ref_val in self.references.items(): if '@' in ref_type: # the reference is applicable only on some product # format : 'policy@product', eg. 'stigid@product' # for them, we create a separate <ref> element policy, product = ref_type.split('@') ref = ET.SubElement(rule, 'ref') ref.set(policy, ref_val) ref.set('prodtype', product) else: main_ref.set(ref_type, ref_val) if main_ref.attrib: rule.append(main_ref) if self.oval_external_content: check = ET.SubElement(rule, 'check') check.set("system", "http://oval.mitre.org/XMLSchema/oval-definitions-5") external_content = ET.SubElement(check, "check-content-ref") external_content.set("href", self.oval_external_content) else: # TODO: This is pretty much a hack, oval ID will be the same as rule ID # and we don't want the developers to have to keep them in sync. # Therefore let's just add an OVAL ref of that ID. oval_ref = ET.SubElement(rule, "oval") oval_ref.set("id", self.id_) if self.ocil or self.ocil_clause: ocil = add_sub_element(rule, 'ocil', self.ocil if self.ocil else "") if self.ocil_clause: ocil.set("clause", self.ocil_clause) add_warning_elements(rule, self.warnings) if self.platform: platform_el = ET.SubElement(rule, "platform") try: platform_cpe = XCCDF_PLATFORM_TO_CPE[self.platform] except KeyError: raise ValueError("Unsupported platform '%s' in rule '%s'." % (self.platform, self.id_)) platform_el.set("idref", platform_cpe) return rule def to_file(self, file_name): root = self.to_xml_element() tree = ET.ElementTree(root) tree.write(file_name) class DirectoryLoader(object): def __init__(self, profiles_dir, bash_remediation_fns, env_yaml): self.benchmark_file = None self.group_file = None self.loaded_group = None self.rules = [] self.values = [] self.subdirectories = [] self.profiles_dir = profiles_dir self.bash_remediation_fns = bash_remediation_fns self.env_yaml = env_yaml self.parent_group = None def _collect_items_to_load(self, guide_directory): for dir_item in os.listdir(guide_directory): dir_item_path = os.path.join(guide_directory, dir_item) _, extension = os.path.splitext(dir_item) if extension == '.var': self.values.append(dir_item_path) elif dir_item == "benchmark.yml": if self.benchmark_file: raise ValueError("Multiple benchmarks in one directory") self.benchmark_file = dir_item_path elif dir_item == "group.yml": if self.group_file: raise ValueError("Multiple groups in one directory") self.group_file = dir_item_path elif extension == '.rule': self.rules.append(dir_item_path) elif is_rule_dir(dir_item_path): self.rules.append(get_rule_dir_yaml(dir_item_path)) elif dir_item != "tests": if os.path.isdir(dir_item_path): self.subdirectories.append(dir_item_path) else: sys.stderr.write( "Encountered file '%s' while recursing, extension '%s' " "is unknown. Skipping..\n" % (dir_item, extension) ) def load_benchmark_or_group(self, guide_directory): """ Loads a given benchmark or group from the specified benchmark_file or group_file, in the context of guide_directory, action, profiles_dir, env_yaml, and bash_remediation_fns. Returns the loaded group or benchmark. """ group = None if self.group_file and self.benchmark_file: raise ValueError("A .benchmark file and a .group file were found in " "the same directory '%s'" % (guide_directory)) # we treat benchmark as a special form of group in the following code if self.benchmark_file: group = Benchmark.from_yaml( self.benchmark_file, 'product-name', self.env_yaml ) if self.profiles_dir: group.add_profiles_from_dir(self.action, self.profiles_dir, self.env_yaml) group.add_bash_remediation_fns_from_file(self.action, self.bash_remediation_fns) if self.group_file: group = Group.from_yaml(self.group_file, self.env_yaml) return group def _load_group_process_and_recurse(self, guide_directory): self.loaded_group = self.load_benchmark_or_group(guide_directory) if self.loaded_group: if self.parent_group: self.parent_group.add_group(self.loaded_group) self._process_values() self._recurse_into_subdirs() self._process_rules() def process_directory_tree(self, start_dir): self._collect_items_to_load(start_dir) self._load_group_process_and_recurse(start_dir) def _recurse_into_subdirs(self): for subdir in self.subdirectories: loader = self._get_new_loader() loader.parent_group = self.loaded_group loader.process_directory_tree(subdir) def _get_new_loader(self): raise NotImplementedError() def _process_values(self): raise NotImplementedError() def _process_rules(self): raise NotImplementedError() class BuildLoader(DirectoryLoader): def __init__(self, profiles_dir, bash_remediation_fns, env_yaml, resolved_rules_dir=None): super(BuildLoader, self).__init__(profiles_dir, bash_remediation_fns, env_yaml) self.action = "build" self.resolved_rules_dir = resolved_rules_dir if resolved_rules_dir and not os.path.isdir(resolved_rules_dir): os.mkdir(resolved_rules_dir) def _process_values(self): for value_yaml in self.values: value = Value.from_yaml(value_yaml, self.env_yaml) self.loaded_group.add_value(value) def _process_rules(self): for rule_yaml in self.rules: rule = Rule.from_yaml(rule_yaml, self.env_yaml) self.loaded_group.add_rule(rule) if self.resolved_rules_dir: output_for_rule = os.path.join( self.resolved_rules_dir, "{id_}.yml".format(id_=rule.id_)) mkdir_p(self.resolved_rules_dir) with open(output_for_rule, "w") as f: yaml.dump(rule.to_contents_dict(), f) def _get_new_loader(self): return BuildLoader( self.profiles_dir, self.bash_remediation_fns, self.env_yaml, self.resolved_rules_dir) def export_group_to_file(self, filename): return self.loaded_group.to_file(filename) class ListInputsLoader(DirectoryLoader): def __init__(self, profiles_dir, bash_remediation_fns, env_yaml): super(ListInputsLoader, self).__init__(profiles_dir, bash_remediation_fns, env_yaml) self.action = "list-inputs" def _process_values(self): for value_yaml in self.values: print(value_yaml) def _process_rules(self): for rule_yaml in self.rules: print(rule_yaml) def _get_new_loader(self): return ListInputsLoader( self.profiles_dir, self.bash_remediation_fns, self.env_yaml) def load_benchmark_or_group(self, guide_directory): result = super(ListInputsLoader, self).load_benchmark_or_group(guide_directory) if self.benchmark_file: print(self.benchmark_file) if self.group_file: print(self.group_file) return result
36.762828
116
0.599051
a5518f4d60bd1b10da48dbb0d6adf170d7eba04a
1,511
py
Python
custom/icds_reports/mobile_views.py
rochakchauhan/commcare-hq
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
[ "BSD-3-Clause" ]
null
null
null
custom/icds_reports/mobile_views.py
rochakchauhan/commcare-hq
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
[ "BSD-3-Clause" ]
null
null
null
custom/icds_reports/mobile_views.py
rochakchauhan/commcare-hq
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
[ "BSD-3-Clause" ]
null
null
null
from django.http import HttpResponseRedirect from django.urls import reverse from django.utils.decorators import method_decorator from django.views.decorators.clickjacking import xframe_options_exempt from django.views.generic import TemplateView from corehq.apps.hqwebapp import views as hqwebapp_views from corehq.apps.locations.permissions import location_safe from custom.icds_reports.dashboard_utils import get_dashboard_template_context from custom.icds_reports.views import DASHBOARD_CHECKS @xframe_options_exempt def login(request, domain): if request.user.is_authenticated: return HttpResponseRedirect(reverse('cas_mobile_dashboard', args=[domain])) return hqwebapp_views.domain_login( request, domain, custom_template_name='icds_reports/mobile_login.html', extra_context={ 'domain': domain, 'next': reverse('cas_mobile_dashboard', args=[domain]) } ) @location_safe @method_decorator(DASHBOARD_CHECKS, name='dispatch') @method_decorator(xframe_options_exempt, name='dispatch') class MobileDashboardView(TemplateView): template_name = 'icds_reports/mobile/dashboard/mobile_dashboard.html' @property def domain(self): return self.kwargs['domain'] def get_context_data(self, **kwargs): kwargs.update(self.kwargs) kwargs.update(get_dashboard_template_context(self.domain, self.request.couch_user)) kwargs['is_mobile'] = True return super().get_context_data(**kwargs)
35.97619
91
0.765056
766d48729fd1d56652ed41e87fd978aae8cd29fe
2,124
py
Python
chips_v/output_stream.py
dawsonjon/chips_v
ed2cb9f7da76c4d4a05812575d062cdb0511656b
[ "MIT" ]
6
2020-09-16T07:38:52.000Z
2021-05-13T20:26:25.000Z
chips_v/output_stream.py
dawsonjon/chips_v
ed2cb9f7da76c4d4a05812575d062cdb0511656b
[ "MIT" ]
null
null
null
chips_v/output_stream.py
dawsonjon/chips_v
ed2cb9f7da76c4d4a05812575d062cdb0511656b
[ "MIT" ]
2
2020-10-12T14:52:07.000Z
2021-03-29T16:49:46.000Z
""" A bus slave that occupies a single word address provides an axi-like streaming output e.g. for connection to UART""" from baremetal import * from chips_v.utils import shex class OutputStream: def __init__(self, name): self.data = Unsigned(32).wire() self.ready = Boolean().wire() self.valid = Boolean().wire() self.name = name def get_outputs(self): outputs = [] outp = Boolean().output(self.name + "_valid_out", self.valid) outputs.append(outp) subtype = self.data.subtype outp = subtype.output(self.name + "_out", self.data) outputs.append(outp) return outputs def get_inputs(self): inputs = [] inp = Boolean().input(self.name + "_ready_in") self.ready.drive(inp) inputs.append(inp) return inputs def initialise_sim(self): """in simulation terminate IO with a reasonable value""" self.ready.drive(Boolean().constant(1)) def simulation_step(self): """action to perform during each simulation cycle""" if self.ready.get() and self.valid.get(): print( "OutputStream %s writing: %s %s %s" % ( self.name, self.data.get(), shex(self.data.get()), chr(self.data.get() & 0xFF), ) ) def get_declarations(self): """These are declarations that appear in the machine.h header""" return "extern const unsigned int %s;\n" % self.name def get_definitions(self): """These are declarations that appear in the machine.c definition""" return "const unsigned int %s = 0x%xu;\n" % (self.name, self.address) def enumerate(self, address): """reserve address space""" self.address = address def attach(self, clk, bus): slave = bus.add_slave(self.address) self.valid = slave.valid & slave.write_read self.data = slave.m2s slave.s2m.drive(Unsigned(32).constant(0)) slave.ready.drive(self.ready)
31.235294
77
0.579567
d8e30391c59f1fc1aa64dc3ef100974915a38b24
3,314
py
Python
tfx/tools/cli/commands/run_test.py
yongsheng268/tfx
6283fffb3ac81e2f213b4895fbe19623dfa9c4f5
[ "Apache-2.0" ]
null
null
null
tfx/tools/cli/commands/run_test.py
yongsheng268/tfx
6283fffb3ac81e2f213b4895fbe19623dfa9c4f5
[ "Apache-2.0" ]
null
null
null
tfx/tools/cli/commands/run_test.py
yongsheng268/tfx
6283fffb3ac81e2f213b4895fbe19623dfa9c4f5
[ "Apache-2.0" ]
null
null
null
# Copyright 2019 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for tfx.tools.cli.commands.run.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import codecs import locale import os import sys from click import testing as click_testing import mock import tensorflow as tf from tfx.tools.cli.commands.run import run_group class RunTest(tf.test.TestCase): def setUp(self): # Change the encoding for Click since Python 3 is configured to use ASCII as # encoding for the environment. super(RunTest, self).setUp() if codecs.lookup(locale.getpreferredencoding()).name == 'ascii': os.environ['LANG'] = 'en_US.utf-8' self.runner = click_testing.CliRunner() sys.modules['handler_factory'] = mock.Mock() def test_run_create_airflow(self): result = self.runner.invoke( run_group, ['create', '--pipeline_name', 'chicago', '--engine', 'airflow']) self.assertIn('Creating a run for pipeline', result.output) def test_run_create_kubeflow(self): result = self.runner.invoke(run_group, [ 'create', '--pipeline_name', 'chicago', '--engine', 'kubeflow', '--iap_client_id', 'fake_id', '--namespace', 'kubeflow', '--endpoint', 'endpoint_url' ]) self.assertIn('Creating a run for pipeline', result.output) def test_run_list(self): result = self.runner.invoke( run_group, ['list', '--pipeline_name', 'chicago', '--engine', 'airflow']) self.assertIn('Listing all runs of pipeline', result.output) def test_run_status_airflow(self): result = self.runner.invoke(run_group, [ 'status', '--pipeline_name', 'chicago_taxi_pipeline', '--run_id', 'airflow_run_id', '--engine', 'airflow' ]) self.assertIn('Retrieving run status', result.output) def test_run_status_kubeflow(self): result = self.runner.invoke(run_group, [ 'status', '--pipeline_name', 'chicago_taxi_pipeline', '--run_id', 'kubeflow_run_id', '--engine', 'kubeflow', '--iap_client_id', 'fake_id', '--namespace', 'kubeflow', '--endpoint', 'endpoint_url' ]) self.assertIn('Retrieving run status', result.output) def test_run_terminate(self): result = self.runner.invoke( run_group, ['terminate', '--run_id', 'airflow_run_id', '--engine', 'airflow']) self.assertIn('Terminating run.', result.output) def test_run_delete(self): result = self.runner.invoke(run_group, [ 'delete', '--run_id', 'kubeflow_run_id', '--engine', 'kubeflow', '--iap_client_id', 'fake_id', '--namespace', 'kubeflow', '--endpoint', 'endpoint_url' ]) self.assertIn('Deleting run', result.output) if __name__ == '__main__': tf.test.main()
34.884211
80
0.683464