text stringlengths 4 1.02M | meta dict |
|---|---|
from setuptools import setup
with open('README.md', 'r') as readme_f:
long_description = readme_f.read()
setup(
name='py-wasapi-client',
version='1.1.0',
url='https://github.com/unt-libraries/py-wasapi-client',
author='University of North Texas Libraries',
author_email='lauren.ko@unt.edu',
license='BSD',
py_modules=['wasapi_client'],
scripts=['wasapi_client.py'],
description='A client for the Archive-It and Webrecorder WASAPI Data Transer API',
long_description=long_description,
long_description_content_type='text/markdown',
install_requires=['requests>=2.18.1'],
entry_points={
'console_scripts': [
'wasapi-client=wasapi_client:main'
]
},
setup_requires=['pytest-runner'],
tests_require=['pytest'],
classifiers=[
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Communications :: File Sharing',
],
)
| {
"content_hash": "33605729ba60e81b7cebd9e4e682e7a7",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 86,
"avg_line_length": 32.43589743589744,
"alnum_prop": 0.6205533596837944,
"repo_name": "unt-libraries/py-wasapi-client",
"id": "af578d901a88cdea874dbc12ad047e4345b6786c",
"size": "1288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "59353"
}
],
"symlink_target": ""
} |
from django.db import models
class Example(models.Model):
title = models.CharField(max_length=80)
product_type = models.CharField(max_length=100)
review = models.TextField(max_length=500, unique=True)
media_link = models.URLField(blank=True, null=True, max_length=100)
audio_image = models.ImageField(blank=True, null=True, upload_to='examples/')
media = models.FileField(blank=True, null=True, upload_to='examples/')
order = models.IntegerField(default=0)
STATUS_CHOICES = (
('d', 'Draft'),
('p', 'Published'),
)
status = models.CharField(max_length=1, choices=STATUS_CHOICES,
default='d')
class Meta:
ordering = ["order"]
def __str__(self):
return self.title
class Musician(models.Model):
name = models.CharField(max_length=80, unique=True)
position = models.CharField(max_length=100)
musical_styles = models.CharField(max_length=100)
image = models.ImageField(blank=True, null=True, upload_to='musicians/')
bio = models.TextField(max_length=500, unique=True)
profile_url = models.URLField(blank=True, null=True, max_length=100)
song_url_1 = models.URLField(blank=True, null=True, max_length=100)
song_name_1 = models.CharField(blank=True, null=True, max_length=80)
song_url_2 = models.URLField(blank=True, null=True, max_length=100)
song_name_2 = models.CharField(blank=True, null=True, max_length=80)
order = models.IntegerField(default=0)
STATUS_CHOICES = (
('d', 'Draft'),
('p', 'Published'),
)
status = models.CharField(max_length=1, choices=STATUS_CHOICES,
default='d')
class Meta:
ordering = ["order"]
def __str__(self):
return self.name
| {
"content_hash": "18c834ef8fb7cfba76c29d72c171e23b",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 81,
"avg_line_length": 36.55102040816327,
"alnum_prop": 0.6404243439419319,
"repo_name": "SarahJaine/say-it",
"id": "58eca26915876043532b99f968301341440fdb38",
"size": "1791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sayit/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "24854"
},
{
"name": "HTML",
"bytes": "16294"
},
{
"name": "JavaScript",
"bytes": "7000"
},
{
"name": "Python",
"bytes": "23259"
},
{
"name": "Shell",
"bytes": "136"
}
],
"symlink_target": ""
} |
import logging
from pylons import tmpl_context as c
from datetime import datetime
from allura.app import Application, SitemapEntry
from allura.lib import helpers as h
from allura import model as M
from allura.eventslistener import EventsListener
from model.stats import UserStats
from controllers.userstats import ForgeUserStatsController
from forgeuserstats import version
from ming.orm import session
log = logging.getLogger(__name__)
class UserStatsListener(EventsListener):
def newArtifact(self, art_type, art_datetime, project, user):
stats = user.stats
if not stats:
stats = UserStats.create(user)
stats.addNewArtifact(art_type, art_datetime, project)
def modifiedArtifact(self, art_type, art_datetime, project, user):
stats = user.stats
if not stats:
stats = UserStats.create(user)
stats.addModifiedArtifact(art_type, art_datetime, project)
def newUser(self, user):
UserStats.create(user)
def ticketEvent(self, event_type, ticket, project, user):
if user is None:
return
stats = user.stats
if not stats:
stats = UserStats.create(user)
if event_type == "assigned":
stats.addAssignedTicket(ticket.mod_date, project)
elif event_type == "revoked":
stats.addRevokedTicket(ticket.mod_date, project)
elif event_type == "closed":
stats.addClosedTicket(
ticket.created_date, ticket.mod_date, project)
def newCommit(self, newcommit, project, user):
stats = user.stats
if not stats:
stats = UserStats.create(user)
stats.addCommit(newcommit, datetime.utcnow(), project)
def addUserLogin(self, user):
stats = user.stats
if not stats:
stats = UserStats.create(user)
stats.addLogin(datetime.utcnow())
def newOrganization(self, organization):
pass
class ForgeUserStatsApp(Application):
__version__ = version.__version__
tool_label = 'UserStats'
default_mount_label = 'Stats'
default_mount_point = 'stats'
permissions = ['configure', 'read', 'write',
'unmoderated_post', 'post', 'moderate', 'admin']
permissions_desc = {
'read': 'View user stats.',
'admin': 'Toggle stats visibility.',
}
max_instances = 0
ordinal = 15
config_options = Application.config_options
default_external_feeds = []
icons = {
24: 'userstats/images/stats_24.png',
32: 'userstats/images/stats_32.png',
48: 'userstats/images/stats_48.png'
}
root = ForgeUserStatsController()
def __init__(self, project, config):
Application.__init__(self, project, config)
role_admin = M.ProjectRole.by_name('Admin', project)._id
role_anon = M.ProjectRole.by_name('*anonymous', project)._id
self.config.acl = [
M.ACE.allow(role_anon, 'read'),
M.ACE.allow(role_admin, 'admin')]
def main_menu(self):
return [SitemapEntry(self.config.options.mount_label, '.')]
def is_visible_to(self, user):
# we don't work with user subprojects
return c.project.is_root
@property
@h.exceptionless([], log)
def sitemap(self):
menu_id = self.config.options.mount_label
with h.push_config(c, app=self):
return [
SitemapEntry(menu_id, '.')[self.sidebar_menu()]]
@property
def show_discussion(self):
if 'show_discussion' in self.config.options:
return self.config.options['show_discussion']
else:
return True
@h.exceptionless([], log)
def sidebar_menu(self):
base = c.app.url
links = [SitemapEntry('Overview', base),
SitemapEntry('Commits', base + 'commits'),
SitemapEntry('Artifacts', base + 'artifacts'),
SitemapEntry('Tickets', base + 'tickets')]
return links
def admin_menu(self):
links = [SitemapEntry(
'Settings', c.project.url() + 'userstats/settings')]
return links
def install(self, project):
# It doesn't make any sense to install the tool twice on the same
# project therefore, if it already exists, it doesn't install it
# a second time.
for tool in project.app_configs:
if tool.tool_name == 'userstats':
if self.config.options.mount_point != tool.options.mount_point:
project.uninstall_app(self.config.options.mount_point)
return
def uninstall(self, project):
self.config.delete()
session(self.config).flush()
| {
"content_hash": "3e9b2cff12dc9f428c1ee043b59d8498",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 79,
"avg_line_length": 31.993243243243242,
"alnum_prop": 0.6149947201689546,
"repo_name": "apache/incubator-allura",
"id": "eb25140ac6d7099b987f80a975ad174ccfdf0ad7",
"size": "5621",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ForgeUserStats/forgeuserstats/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "155606"
},
{
"name": "JavaScript",
"bytes": "697175"
},
{
"name": "Puppet",
"bytes": "6882"
},
{
"name": "Python",
"bytes": "3667166"
},
{
"name": "Ruby",
"bytes": "5739"
},
{
"name": "Shell",
"bytes": "31675"
},
{
"name": "XSLT",
"bytes": "3357"
}
],
"symlink_target": ""
} |
import yaml
class SettingManager:
def __init__(self,path='application.yml'):
with open(path, "r+") as f:
self.properties = yaml.safe_load(f)
| {
"content_hash": "9f7d073fa63aa012fadd394c089a3b60",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 47,
"avg_line_length": 21,
"alnum_prop": 0.6011904761904762,
"repo_name": "yamanakahirofumi/mokobot",
"id": "fdde84233f54ac9a24a1349d9814689c48c00e27",
"size": "193",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18910"
}
],
"symlink_target": ""
} |
from __future__ import with_statement
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_version():
with open('ircclient/version.txt') as f:
return f.read().strip()
def get_readme():
try:
with open('README.rst') as f:
return f.read().strip()
except IOError:
return ''
setup(
name='ircclient',
version=get_version(),
description='Simple client interface.',
long_description=get_readme(),
author='Jeong YunWon',
author_email='jeong+ircclient@youknowone.org',
url='https://github.com/youknowone/ircclient',
packages=(
'ircclient',
),
package_data={
'ircclient': ['version.txt']
},
install_requires=[
'setuptools',
],
)
| {
"content_hash": "357ee17229d5255f9a76a3a4afd396eb",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 50,
"avg_line_length": 20.487179487179485,
"alnum_prop": 0.6070087609511889,
"repo_name": "youknowone/ircclient",
"id": "fe489b4c2d984b35aad3c74b3663a80634a9d6c1",
"size": "799",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "24199"
}
],
"symlink_target": ""
} |
import os
DEBUG = True
SITE_ID = 1
APP_ROOT = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..'))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
ROOT_URLCONF = 'django_csv_tests.tests.urls'
STATIC_URL = '/static/'
TEMPLATE_DIRS = (
os.path.join(APP_ROOT, 'tests/templates'),
)
EXTERNAL_APPS = [
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'django.contrib.sites',
]
INTERNAL_APPS = [
'django_csv_tests',
'django_csv_tests.tests.test_app',
]
INSTALLED_APPS = EXTERNAL_APPS + INTERNAL_APPS
SECRET_KEY = 'foobar'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
)
| {
"content_hash": "8d6f73fdf2d5ccae6e92d7dd2f0fe4ac",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 62,
"avg_line_length": 23.375,
"alnum_prop": 0.6864973262032086,
"repo_name": "enricobarzetti/django_csv_tests",
"id": "5a77549b74e8632be3ff1003097e83489154b0b9",
"size": "1496",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_csv_tests/tests/test_settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8993"
},
{
"name": "Shell",
"bytes": "73"
}
],
"symlink_target": ""
} |
from django.conf.urls import include, url
from django.views.generic import RedirectView
from rest_framework import routers
from periods import views as period_views
router = routers.DefaultRouter()
router.register(r'periods', period_views.FlowEventViewSet, base_name='periods')
router.register(r'statistics', period_views.StatisticsViewSet, base_name='statistics')
urlpatterns = [
url(r'^$', RedirectView.as_view(url='calendar/', permanent=False)),
url(r'^accounts/', include('allauth.urls')),
url(r'^accounts/profile/$', period_views.ProfileUpdateView.as_view(), name='user_profile'),
url(r'^accounts/profile/api_info/$', period_views.ApiInfoView.as_view(), name='api_info'),
url(r'^accounts/profile/regenerate_key/$', period_views.RegenerateKeyView.as_view(),
name='regenerate_key'),
url(r'^api/v2/', include(router.urls)),
url(r'^api/v2/authenticate/$', period_views.ApiAuthenticateView.as_view(), name='authenticate'),
url(r'^api/v2/aeris/$', period_views.AerisView.as_view(), name='aeris'),
url(r'^flow_event/$', period_views.FlowEventCreateView.as_view(), name='flow_event_create'),
url(r'^flow_event/(?P<pk>[0-9]+)/$', period_views.FlowEventUpdateView.as_view(),
name='flow_event_update'),
url(r'^flow_events/$', period_views.FlowEventFormSetView.as_view(), name='flow_events'),
url(r'^calendar/$', period_views.CalendarView.as_view(), name='calendar'),
url(r'^statistics/$', period_views.StatisticsView.as_view(), name='statistics'),
url(r'^statistics/cycle_length_frequency/$', period_views.CycleLengthFrequencyView.as_view()),
url(r'^statistics/cycle_length_history/$', period_views.CycleLengthHistoryView.as_view()),
url(r'^statistics/qigong_cycles/$', period_views.QigongCycleView.as_view()),
]
| {
"content_hash": "e6350e1596a397969d2e0d58d7e1accb",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 100,
"avg_line_length": 54.333333333333336,
"alnum_prop": 0.7105409927495817,
"repo_name": "jessamynsmith/eggtimer-server",
"id": "10d58fac540d40061496237ac886bfdbc6813686",
"size": "1793",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "periods/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1917"
},
{
"name": "HTML",
"bytes": "27499"
},
{
"name": "JavaScript",
"bytes": "18309"
},
{
"name": "Python",
"bytes": "116098"
},
{
"name": "Shell",
"bytes": "587"
}
],
"symlink_target": ""
} |
"""Compatibility layer for different database engines
This modules stores logic specific to different database engines. Things
like time-related functions that are similar but not identical, or
information as to expose certain features or not and how to expose them.
For instance, Hive/Presto supports partitions and have a specific API to
list partitions. Other databases like Vertica also support partitions but
have different API to get to them. Other databases don't support partitions
at all. The classes here will use a common interface to specify all this.
The general idea is to use static classes and an inheritance scheme.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple, defaultdict
import inspect
import logging
import re
import textwrap
import time
import sqlparse
from sqlalchemy import select
from sqlalchemy.sql import text
from flask_babel import lazy_gettext as _
from superset.utils import SupersetTemplateException
from superset.utils import QueryStatus
from superset import conf, cache_util, utils
tracking_url_trans = conf.get('TRACKING_URL_TRANSFORMER')
Grain = namedtuple('Grain', 'name label function')
class LimitMethod(object):
"""Enum the ways that limits can be applied"""
FETCH_MANY = 'fetch_many'
WRAP_SQL = 'wrap_sql'
class BaseEngineSpec(object):
"""Abstract class for database engine specific configurations"""
engine = 'base' # str as defined in sqlalchemy.engine.engine
cursor_execute_kwargs = {}
time_grains = tuple()
time_groupby_inline = False
limit_method = LimitMethod.FETCH_MANY
time_secondary_columns = False
@classmethod
def fetch_data(cls, cursor, limit):
if cls.limit_method == LimitMethod.FETCH_MANY:
return cursor.fetchmany(limit)
return cursor.fetchall()
@classmethod
def epoch_to_dttm(cls):
raise NotImplementedError()
@classmethod
def epoch_ms_to_dttm(cls):
return cls.epoch_to_dttm().replace('{col}', '({col}/1000.0)')
@classmethod
def extra_table_metadata(cls, database, table_name, schema_name):
"""Returns engine-specific table metadata"""
return {}
@classmethod
def escape_sql(cls, sql):
"""Escapes the raw SQL"""
return sql
@classmethod
def convert_dttm(cls, target_type, dttm):
return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
@classmethod
@cache_util.memoized_func(
timeout=600,
key=lambda *args, **kwargs: 'db:{}:{}'.format(args[0].id, args[1]))
def fetch_result_sets(cls, db, datasource_type, force=False):
"""Returns the dictionary {schema : [result_set_name]}.
Datasource_type can be 'table' or 'view'.
Empty schema corresponds to the list of full names of the all
tables or views: <schema>.<result_set_name>.
"""
schemas = db.inspector.get_schema_names()
result_sets = {}
all_result_sets = []
for schema in schemas:
if datasource_type == 'table':
result_sets[schema] = sorted(
db.inspector.get_table_names(schema))
elif datasource_type == 'view':
result_sets[schema] = sorted(
db.inspector.get_view_names(schema))
all_result_sets += [
'{}.{}'.format(schema, t) for t in result_sets[schema]]
if all_result_sets:
result_sets[""] = all_result_sets
return result_sets
@classmethod
def handle_cursor(cls, cursor, query, session):
"""Handle a live cursor between the execute and fetchall calls
The flow works without this method doing anything, but it allows
for handling the cursor and updating progress information in the
query object"""
pass
@classmethod
def extract_error_message(cls, e):
"""Extract error message for queries"""
return utils.error_msg_from_exception(e)
@classmethod
def adjust_database_uri(cls, uri, selected_schema):
"""Based on a URI and selected schema, return a new URI
The URI here represents the URI as entered when saving the database,
``selected_schema`` is the schema currently active presumably in
the SQL Lab dropdown. Based on that, for some database engine,
we can return a new altered URI that connects straight to the
active schema, meaning the users won't have to prefix the object
names by the schema name.
Some databases engines have 2 level of namespacing: database and
schema (postgres, oracle, mssql, ...)
For those it's probably better to not alter the database
component of the URI with the schema name, it won't work.
Some database drivers like presto accept "{catalog}/{schema}" in
the database component of the URL, that can be handled here.
"""
return uri
@classmethod
def patch(cls):
pass
@classmethod
def get_table_names(cls, schema, inspector):
return sorted(inspector.get_table_names(schema))
@classmethod
def where_latest_partition(
cls, table_name, schema, database, qry, columns=None):
return False
@classmethod
def select_star(cls, my_db, table_name, schema=None, limit=100,
show_cols=False, indent=True, latest_partition=True):
fields = '*'
cols = []
if show_cols or latest_partition:
cols = my_db.get_table(table_name, schema=schema).columns
if show_cols:
fields = [my_db.get_quoter()(c.name) for c in cols]
full_table_name = table_name
if schema:
full_table_name = schema + '.' + table_name
qry = select(fields).select_from(text(full_table_name))
if limit:
qry = qry.limit(limit)
if latest_partition:
partition_query = cls.where_latest_partition(
table_name, schema, my_db, qry, columns=cols)
if partition_query != False: # noqa
qry = partition_query
sql = my_db.compile_sqla_query(qry)
if indent:
sql = sqlparse.format(sql, reindent=True)
return sql
class PostgresEngineSpec(BaseEngineSpec):
engine = 'postgresql'
time_grains = (
Grain("Time Column", _('Time Column'), "{col}"),
Grain("second", _('second'), "DATE_TRUNC('second', {col})"),
Grain("minute", _('minute'), "DATE_TRUNC('minute', {col})"),
Grain("hour", _('hour'), "DATE_TRUNC('hour', {col})"),
Grain("day", _('day'), "DATE_TRUNC('day', {col})"),
Grain("week", _('week'), "DATE_TRUNC('week', {col})"),
Grain("month", _('month'), "DATE_TRUNC('month', {col})"),
Grain("quarter", _('quarter'), "DATE_TRUNC('quarter', {col})"),
Grain("year", _('year'), "DATE_TRUNC('year', {col})"),
)
@classmethod
def fetch_data(cls, cursor, limit):
if not cursor.description:
return []
if cls.limit_method == LimitMethod.FETCH_MANY:
return cursor.fetchmany(limit)
return cursor.fetchall()
@classmethod
def epoch_to_dttm(cls):
return "(timestamp 'epoch' + {col} * interval '1 second')"
@classmethod
def convert_dttm(cls, target_type, dttm):
return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
class Db2EngineSpec(BaseEngineSpec):
engine = 'ibm_db_sa'
time_grains = (
Grain('Time Column', _('Time Column'), '{col}'),
Grain('second', _('second'),
'CAST({col} as TIMESTAMP)'
' - MICROSECOND({col}) MICROSECONDS'),
Grain('minute', _('minute'),
'CAST({col} as TIMESTAMP)'
' - SECOND({col}) SECONDS'
' - MICROSECOND({col}) MICROSECONDS'),
Grain('hour', _('hour'),
'CAST({col} as TIMESTAMP)'
' - MINUTE({col}) MINUTES'
' - SECOND({col}) SECONDS'
' - MICROSECOND({col}) MICROSECONDS '),
Grain('day', _('day'),
'CAST({col} as TIMESTAMP)'
' - HOUR({col}) HOURS'
' - MINUTE({col}) MINUTES'
' - SECOND({col}) SECONDS'
' - MICROSECOND({col}) MICROSECONDS '),
Grain('week', _('week'),
'{col} - (DAYOFWEEK({col})) DAYS'),
Grain('month', _('month'),
'{col} - (DAY({col})-1) DAYS'),
Grain('quarter', _('quarter'),
'{col} - (DAY({col})-1) DAYS'
' - (MONTH({col})-1) MONTHS'
' + ((QUARTER({col})-1) * 3) MONTHS'),
Grain('year', _('year'),
'{col} - (DAY({col})-1) DAYS'
' - (MONTH({col})-1) MONTHS'),
)
@classmethod
def epoch_to_dttm(cls):
return "(TIMESTAMP('1970-01-01', '00:00:00') + {col} SECONDS)"
@classmethod
def convert_dttm(cls, target_type, dttm):
return "'{}'".format(dttm.strftime('%Y-%m-%d-%H.%M.%S'))
class SqliteEngineSpec(BaseEngineSpec):
engine = 'sqlite'
time_grains = (
Grain('Time Column', _('Time Column'), '{col}'),
Grain('day', _('day'), 'DATE({col})'),
Grain("week", _('week'),
"DATE({col}, -strftime('%w', {col}) || ' days')"),
Grain("month", _('month'),
"DATE({col}, -strftime('%d', {col}) || ' days', '+1 day')"),
)
@classmethod
def epoch_to_dttm(cls):
return "datetime({col}, 'unixepoch')"
@classmethod
@cache_util.memoized_func(
timeout=600,
key=lambda *args, **kwargs: 'db:{}:{}'.format(args[0].id, args[1]))
def fetch_result_sets(cls, db, datasource_type, force=False):
schemas = db.inspector.get_schema_names()
result_sets = {}
all_result_sets = []
schema = schemas[0]
if datasource_type == 'table':
result_sets[schema] = sorted(db.inspector.get_table_names())
elif datasource_type == 'view':
result_sets[schema] = sorted(db.inspector.get_view_names())
all_result_sets += [
'{}.{}'.format(schema, t) for t in result_sets[schema]]
if all_result_sets:
result_sets[""] = all_result_sets
return result_sets
@classmethod
def convert_dttm(cls, target_type, dttm):
iso = dttm.isoformat().replace('T', ' ')
if '.' not in iso:
iso += '.000000'
return "'{}'".format(iso)
@classmethod
def get_table_names(cls, schema, inspector):
"""Need to disregard the schema for Sqlite"""
return sorted(inspector.get_table_names())
class MySQLEngineSpec(BaseEngineSpec):
engine = 'mysql'
cursor_execute_kwargs = {'args': {}}
time_grains = (
Grain('Time Column', _('Time Column'), '{col}'),
Grain("second", _('second'), "DATE_ADD(DATE({col}), "
"INTERVAL (HOUR({col})*60*60 + MINUTE({col})*60"
" + SECOND({col})) SECOND)"),
Grain("minute", _('minute'), "DATE_ADD(DATE({col}), "
"INTERVAL (HOUR({col})*60 + MINUTE({col})) MINUTE)"),
Grain("hour", _('hour'), "DATE_ADD(DATE({col}), "
"INTERVAL HOUR({col}) HOUR)"),
Grain('day', _('day'), 'DATE({col})'),
Grain("week", _('week'), "DATE(DATE_SUB({col}, "
"INTERVAL DAYOFWEEK({col}) - 1 DAY))"),
Grain("month", _('month'), "DATE(DATE_SUB({col}, "
"INTERVAL DAYOFMONTH({col}) - 1 DAY))"),
Grain("quarter", _('quarter'), "MAKEDATE(YEAR({col}), 1) "
"+ INTERVAL QUARTER({col}) QUARTER - INTERVAL 1 QUARTER"),
Grain("year", _('year'), "DATE(DATE_SUB({col}, "
"INTERVAL DAYOFYEAR({col}) - 1 DAY))"),
Grain("week_start_monday", _('week_start_monday'),
"DATE(DATE_SUB({col}, "
"INTERVAL DAYOFWEEK(DATE_SUB({col}, INTERVAL 1 DAY)) - 1 DAY))"),
)
@classmethod
def convert_dttm(cls, target_type, dttm):
if target_type.upper() in ('DATETIME', 'DATE'):
return "STR_TO_DATE('{}', '%Y-%m-%d %H:%i:%s')".format(
dttm.strftime('%Y-%m-%d %H:%M:%S'))
return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
@classmethod
def adjust_database_uri(cls, uri, selected_schema=None):
if selected_schema:
uri.database = selected_schema
return uri
@classmethod
def epoch_to_dttm(cls):
return "from_unixtime({col})"
class PrestoEngineSpec(BaseEngineSpec):
engine = 'presto'
cursor_execute_kwargs = {'parameters': None}
time_grains = (
Grain('Time Column', _('Time Column'), '{col}'),
Grain('second', _('second'),
"date_trunc('second', CAST({col} AS TIMESTAMP))"),
Grain('minute', _('minute'),
"date_trunc('minute', CAST({col} AS TIMESTAMP))"),
Grain('hour', _('hour'),
"date_trunc('hour', CAST({col} AS TIMESTAMP))"),
Grain('day', _('day'),
"date_trunc('day', CAST({col} AS TIMESTAMP))"),
Grain('week', _('week'),
"date_trunc('week', CAST({col} AS TIMESTAMP))"),
Grain('month', _('month'),
"date_trunc('month', CAST({col} AS TIMESTAMP))"),
Grain('quarter', _('quarter'),
"date_trunc('quarter', CAST({col} AS TIMESTAMP))"),
Grain("week_ending_saturday", _('week_ending_saturday'),
"date_add('day', 5, date_trunc('week', date_add('day', 1, "
"CAST({col} AS TIMESTAMP))))"),
Grain("week_start_sunday", _('week_start_sunday'),
"date_add('day', -1, date_trunc('week', "
"date_add('day', 1, CAST({col} AS TIMESTAMP))))"),
)
@classmethod
def patch(cls):
from pyhive import presto
from superset.db_engines import presto as patched_presto
presto.Cursor.cancel = patched_presto.cancel
@classmethod
def adjust_database_uri(cls, uri, selected_schema=None):
database = uri.database
if selected_schema:
if '/' in database:
database = database.split('/')[0] + '/' + selected_schema
else:
database += '/' + selected_schema
uri.database = database
return uri
@classmethod
def escape_sql(cls, sql):
return re.sub(r'%%|%', "%%", sql)
@classmethod
def convert_dttm(cls, target_type, dttm):
tt = target_type.upper()
if tt == 'DATE':
return "from_iso8601_date('{}')".format(dttm.isoformat()[:10])
if tt == 'TIMESTAMP':
return "from_iso8601_timestamp('{}')".format(dttm.isoformat())
return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
@classmethod
def epoch_to_dttm(cls):
return "from_unixtime({col})"
@classmethod
@cache_util.memoized_func(
timeout=600,
key=lambda *args, **kwargs: 'db:{}:{}'.format(args[0].id, args[1]))
def fetch_result_sets(cls, db, datasource_type, force=False):
"""Returns the dictionary {schema : [result_set_name]}.
Datasource_type can be 'table' or 'view'.
Empty schema corresponds to the list of full names of the all
tables or views: <schema>.<result_set_name>.
"""
result_set_df = db.get_df(
"""SELECT table_schema, table_name FROM INFORMATION_SCHEMA.{}S
ORDER BY concat(table_schema, '.', table_name)""".format(
datasource_type.upper()), None)
result_sets = defaultdict(list)
for unused, row in result_set_df.iterrows():
result_sets[row['table_schema']].append(row['table_name'])
result_sets[""].append('{}.{}'.format(
row['table_schema'], row['table_name']))
return result_sets
@classmethod
def extra_table_metadata(cls, database, table_name, schema_name):
indexes = database.get_indexes(table_name, schema_name)
if not indexes:
return {}
cols = indexes[0].get('column_names', [])
full_table_name = table_name
if schema_name and '.' not in table_name:
full_table_name = "{}.{}".format(schema_name, table_name)
pql = cls._partition_query(full_table_name)
col_name, latest_part = cls.latest_partition(
table_name, schema_name, database, show_first=True)
return {
'partitions': {
'cols': cols,
'latest': {col_name: latest_part},
'partitionQuery': pql,
}
}
@classmethod
def handle_cursor(cls, cursor, query, session):
"""Updates progress information"""
logging.info('Polling the cursor for progress')
polled = cursor.poll()
# poll returns dict -- JSON status information or ``None``
# if the query is done
# https://github.com/dropbox/PyHive/blob/
# b34bdbf51378b3979eaf5eca9e956f06ddc36ca0/pyhive/presto.py#L178
while polled:
# Update the object and wait for the kill signal.
stats = polled.get('stats', {})
query = session.query(type(query)).filter_by(id=query.id).one()
if query.status == QueryStatus.STOPPED:
cursor.cancel()
break
if stats:
completed_splits = float(stats.get('completedSplits'))
total_splits = float(stats.get('totalSplits'))
if total_splits and completed_splits:
progress = 100 * (completed_splits / total_splits)
logging.info(
'Query progress: {} / {} '
'splits'.format(completed_splits, total_splits))
if progress > query.progress:
query.progress = progress
session.commit()
time.sleep(1)
logging.info('Polling the cursor for progress')
polled = cursor.poll()
@classmethod
def extract_error_message(cls, e):
if (
hasattr(e, 'orig') and
type(e.orig).__name__ == 'DatabaseError' and
isinstance(e.orig[0], dict)):
error_dict = e.orig[0]
return '{} at {}: {}'.format(
error_dict['errorName'],
error_dict['errorLocation'],
error_dict['message']
)
return utils.error_msg_from_exception(e)
@classmethod
def _partition_query(
cls, table_name, limit=0, order_by=None, filters=None):
"""Returns a partition query
:param table_name: the name of the table to get partitions from
:type table_name: str
:param limit: the number of partitions to be returned
:type limit: int
:param order_by: a list of tuples of field name and a boolean
that determines if that field should be sorted in descending
order
:type order_by: list of (str, bool) tuples
:param filters: a list of filters to apply
:param filters: dict of field name and filter value combinations
"""
limit_clause = "LIMIT {}".format(limit) if limit else ''
order_by_clause = ''
if order_by:
l = []
for field, desc in order_by:
l.append(field + ' DESC' if desc else '')
order_by_clause = 'ORDER BY ' + ', '.join(l)
where_clause = ''
if filters:
l = []
for field, value in filters.items():
l.append("{field} = '{value}'".format(**locals()))
where_clause = 'WHERE ' + ' AND '.join(l)
sql = textwrap.dedent("""\
SHOW PARTITIONS FROM {table_name}
{where_clause}
{order_by_clause}
{limit_clause}
""").format(**locals())
return sql
@classmethod
def _latest_partition_from_df(cls, df):
recs = df.to_records(index=False)
if recs:
return recs[0][0]
@classmethod
def latest_partition(cls, table_name, schema, database, show_first=False):
"""Returns col name and the latest (max) partition value for a table
:param table_name: the name of the table
:type table_name: str
:param schema: schema / database / namespace
:type schema: str
:param database: database query will be run against
:type database: models.Database
:param show_first: displays the value for the first partitioning key
if there are many partitioning keys
:type show_first: bool
>>> latest_partition('foo_table')
'2018-01-01'
"""
indexes = database.get_indexes(table_name, schema)
if len(indexes[0]['column_names']) < 1:
raise SupersetTemplateException(
"The table should have one partitioned field")
elif not show_first and len(indexes[0]['column_names']) > 1:
raise SupersetTemplateException(
"The table should have a single partitioned field "
"to use this function. You may want to use "
"`presto.latest_sub_partition`")
part_field = indexes[0]['column_names'][0]
sql = cls._partition_query(table_name, 1, [(part_field, True)])
df = database.get_df(sql, schema)
return part_field, cls._latest_partition_from_df(df)
@classmethod
def latest_sub_partition(cls, table_name, schema, database, **kwargs):
"""Returns the latest (max) partition value for a table
A filtering criteria should be passed for all fields that are
partitioned except for the field to be returned. For example,
if a table is partitioned by (``ds``, ``event_type`` and
``event_category``) and you want the latest ``ds``, you'll want
to provide a filter as keyword arguments for both
``event_type`` and ``event_category`` as in
``latest_sub_partition('my_table',
event_category='page', event_type='click')``
:param table_name: the name of the table, can be just the table
name or a fully qualified table name as ``schema_name.table_name``
:type table_name: str
:param schema: schema / database / namespace
:type schema: str
:param database: database query will be run against
:type database: models.Database
:param kwargs: keyword arguments define the filtering criteria
on the partition list. There can be many of these.
:type kwargs: str
>>> latest_sub_partition('sub_partition_table', event_type='click')
'2018-01-01'
"""
indexes = database.get_indexes(table_name, schema)
part_fields = indexes[0]['column_names']
for k in kwargs.keys():
if k not in k in part_fields:
msg = "Field [{k}] is not part of the portioning key"
raise SupersetTemplateException(msg)
if len(kwargs.keys()) != len(part_fields) - 1:
msg = (
"A filter needs to be specified for {} out of the "
"{} fields."
).format(len(part_fields)-1, len(part_fields))
raise SupersetTemplateException(msg)
for field in part_fields:
if field not in kwargs.keys():
field_to_return = field
sql = cls._partition_query(
table_name, 1, [(field_to_return, True)], kwargs)
df = database.get_df(sql, schema)
if df.empty:
return ''
return df.to_dict()[field_to_return][0]
class HiveEngineSpec(PrestoEngineSpec):
"""Reuses PrestoEngineSpec functionality."""
engine = 'hive'
cursor_execute_kwargs = {'async': True}
# Scoping regex at class level to avoid recompiling
# 17/02/07 19:36:38 INFO ql.Driver: Total jobs = 5
jobs_stats_r = re.compile(
r'.*INFO.*Total jobs = (?P<max_jobs>[0-9]+)')
# 17/02/07 19:37:08 INFO ql.Driver: Launching Job 2 out of 5
launching_job_r = re.compile(
'.*INFO.*Launching Job (?P<job_number>[0-9]+) out of '
'(?P<max_jobs>[0-9]+)')
# 17/02/07 19:36:58 INFO exec.Task: 2017-02-07 19:36:58,152 Stage-18
# map = 0%, reduce = 0%
stage_progress_r = re.compile(
r'.*INFO.*Stage-(?P<stage_number>[0-9]+).*'
r'map = (?P<map_progress>[0-9]+)%.*'
r'reduce = (?P<reduce_progress>[0-9]+)%.*')
@classmethod
def patch(cls):
from pyhive import hive
from superset.db_engines import hive as patched_hive
from pythrifthiveapi.TCLIService import (
constants as patched_constants,
ttypes as patched_ttypes,
TCLIService as patched_TCLIService)
hive.TCLIService = patched_TCLIService
hive.constants = patched_constants
hive.ttypes = patched_ttypes
hive.Cursor.fetch_logs = patched_hive.fetch_logs
@classmethod
@cache_util.memoized_func(
timeout=600,
key=lambda *args, **kwargs: 'db:{}:{}'.format(args[0].id, args[1]))
def fetch_result_sets(cls, db, datasource_type, force=False):
return BaseEngineSpec.fetch_result_sets(
db, datasource_type, force=force)
@classmethod
def adjust_database_uri(cls, uri, selected_schema=None):
if selected_schema:
uri.database = selected_schema
return uri
@classmethod
def progress(cls, log_lines):
total_jobs = 1 # assuming there's at least 1 job
current_job = 1
stages = {}
for line in log_lines:
match = cls.jobs_stats_r.match(line)
if match:
total_jobs = int(match.groupdict()['max_jobs']) or 1
match = cls.launching_job_r.match(line)
if match:
current_job = int(match.groupdict()['job_number'])
total_jobs = int(match.groupdict()['max_jobs']) or 1
stages = {}
match = cls.stage_progress_r.match(line)
if match:
stage_number = int(match.groupdict()['stage_number'])
map_progress = int(match.groupdict()['map_progress'])
reduce_progress = int(match.groupdict()['reduce_progress'])
stages[stage_number] = (map_progress + reduce_progress) / 2
logging.info(
"Progress detail: {}, "
"current job {}, "
"total jobs: {}".format(stages, current_job, total_jobs))
stage_progress = sum(
stages.values()) / len(stages.values()) if stages else 0
progress = (
100 * (current_job - 1) / total_jobs + stage_progress / total_jobs
)
return int(progress)
@classmethod
def get_tracking_url(cls, log_lines):
lkp = "Tracking URL = "
for line in log_lines:
if lkp in line:
return line.split(lkp)[1]
@classmethod
def handle_cursor(cls, cursor, query, session):
"""Updates progress information"""
from pyhive import hive
unfinished_states = (
hive.ttypes.TOperationState.INITIALIZED_STATE,
hive.ttypes.TOperationState.RUNNING_STATE,
)
polled = cursor.poll()
last_log_line = 0
tracking_url = None
job_id = None
while polled.operationState in unfinished_states:
query = session.query(type(query)).filter_by(id=query.id).one()
if query.status == QueryStatus.STOPPED:
cursor.cancel()
break
log = cursor.fetch_logs() or ''
if log:
log_lines = log.splitlines()
progress = cls.progress(log_lines)
logging.info("Progress total: {}".format(progress))
needs_commit = False
if progress > query.progress:
query.progress = progress
needs_commit = True
if not tracking_url:
tracking_url = cls.get_tracking_url(log_lines)
if tracking_url:
job_id = tracking_url.split('/')[-2]
logging.info(
"Found the tracking url: {}".format(tracking_url))
tracking_url = tracking_url_trans(tracking_url)
logging.info(
"Transformation applied: {}".format(tracking_url))
query.tracking_url = tracking_url
logging.info("Job id: {}".format(job_id))
needs_commit = True
if job_id and len(log_lines) > last_log_line:
# Wait for job id before logging things out
# this allows for prefixing all log lines and becoming
# searchable in something like Kibana
for l in log_lines[last_log_line:]:
logging.info("[{}] {}".format(job_id, l))
last_log_line = len(log_lines)
if needs_commit:
session.commit()
time.sleep(5)
polled = cursor.poll()
@classmethod
def where_latest_partition(
cls, table_name, schema, database, qry, columns=None):
try:
col_name, value = cls.latest_partition(
table_name, schema, database)
except Exception:
# table is not partitioned
return False
for c in columns:
if str(c.name) == str(col_name):
return qry.where(c == str(value))
return False
@classmethod
def latest_sub_partition(cls, table_name, schema, database, **kwargs):
# TODO(bogdan): implement`
pass
@classmethod
def _latest_partition_from_df(cls, df):
"""Hive partitions look like ds={partition name}"""
return df.ix[:, 0].max().split('=')[1]
@classmethod
def _partition_query(
cls, table_name, limit=0, order_by=None, filters=None):
return "SHOW PARTITIONS {table_name}".format(**locals())
class MssqlEngineSpec(BaseEngineSpec):
engine = 'mssql'
epoch_to_dttm = "dateadd(S, {col}, '1970-01-01')"
time_grains = (
Grain("Time Column", _('Time Column'), "{col}"),
Grain("second", _('second'), "DATEADD(second, "
"DATEDIFF(second, '2000-01-01', {col}), '2000-01-01')"),
Grain("minute", _('minute'), "DATEADD(minute, "
"DATEDIFF(minute, 0, {col}), 0)"),
Grain("5 minute", _('5 minute'), "DATEADD(minute, "
"DATEDIFF(minute, 0, {col}) / 5 * 5, 0)"),
Grain("half hour", _('half hour'), "DATEADD(minute, "
"DATEDIFF(minute, 0, {col}) / 30 * 30, 0)"),
Grain("hour", _('hour'), "DATEADD(hour, "
"DATEDIFF(hour, 0, {col}), 0)"),
Grain("day", _('day'), "DATEADD(day, "
"DATEDIFF(day, 0, {col}), 0)"),
Grain("week", _('week'), "DATEADD(week, "
"DATEDIFF(week, 0, {col}), 0)"),
Grain("month", _('month'), "DATEADD(month, "
"DATEDIFF(month, 0, {col}), 0)"),
Grain("quarter", _('quarter'), "DATEADD(quarter, "
"DATEDIFF(quarter, 0, {col}), 0)"),
Grain("year", _('year'), "DATEADD(year, "
"DATEDIFF(year, 0, {col}), 0)"),
)
@classmethod
def convert_dttm(cls, target_type, dttm):
return "CONVERT(DATETIME, '{}', 126)".format(dttm.isoformat())
class RedshiftEngineSpec(PostgresEngineSpec):
engine = 'redshift'
class OracleEngineSpec(PostgresEngineSpec):
engine = 'oracle'
time_grains = (
Grain('Time Column', _('Time Column'), '{col}'),
Grain('minute', _('minute'),
"TRUNC(TO_DATE({col}), 'MI')"),
Grain('hour', _('hour'),
"TRUNC(TO_DATE({col}), 'HH')"),
Grain('day', _('day'),
"TRUNC(TO_DATE({col}), 'DDD')"),
Grain('week', _('week'),
"TRUNC(TO_DATE({col}), 'WW')"),
Grain('month', _('month'),
"TRUNC(TO_DATE({col}), 'MONTH')"),
Grain('quarter', _('quarter'),
"TRUNC(TO_DATE({col}), 'Q')"),
Grain('year', _('year'),
"TRUNC(TO_DATE({col}), 'YEAR')"),
)
@classmethod
def convert_dttm(cls, target_type, dttm):
return (
"""TO_TIMESTAMP('{}', 'YYYY-MM-DD"T"HH24:MI:SS.ff6')"""
).format(dttm.isoformat())
class VerticaEngineSpec(PostgresEngineSpec):
engine = 'vertica'
class AthenaEngineSpec(BaseEngineSpec):
engine = 'awsathena'
time_grains = (
Grain('Time Column', _('Time Column'), '{col}'),
Grain('second', _('second'),
"date_trunc('second', CAST({col} AS TIMESTAMP))"),
Grain('minute', _('minute'),
"date_trunc('minute', CAST({col} AS TIMESTAMP))"),
Grain('hour', _('hour'),
"date_trunc('hour', CAST({col} AS TIMESTAMP))"),
Grain('day', _('day'),
"date_trunc('day', CAST({col} AS TIMESTAMP))"),
Grain('week', _('week'),
"date_trunc('week', CAST({col} AS TIMESTAMP))"),
Grain('month', _('month'),
"date_trunc('month', CAST({col} AS TIMESTAMP))"),
Grain('quarter', _('quarter'),
"date_trunc('quarter', CAST({col} AS TIMESTAMP))"),
Grain("week_ending_saturday", _('week_ending_saturday'),
"date_add('day', 5, date_trunc('week', date_add('day', 1, "
"CAST({col} AS TIMESTAMP))))"),
Grain("week_start_sunday", _('week_start_sunday'),
"date_add('day', -1, date_trunc('week', "
"date_add('day', 1, CAST({col} AS TIMESTAMP))))"),
)
@classmethod
def convert_dttm(cls, target_type, dttm):
tt = target_type.upper()
if tt == 'DATE':
return "from_iso8601_date('{}')".format(dttm.isoformat()[:10])
if tt == 'TIMESTAMP':
return "from_iso8601_timestamp('{}')".format(dttm.isoformat())
return ("CAST ('{}' AS TIMESTAMP)"
.format(dttm.strftime('%Y-%m-%d %H:%M:%S')))
@classmethod
def epoch_to_dttm(cls):
return "from_unixtime({col})"
class ClickHouseEngineSpec(BaseEngineSpec):
"""Dialect for ClickHouse analytical DB."""
engine = 'clickhouse'
time_secondary_columns = True
time_groupby_inline = True
time_grains = (
Grain('Time Column', _('Time Column'), '{col}'),
Grain('minute', _('minute'),
"toStartOfMinute(toDateTime({col}))"),
Grain('5 minute', _('5 minute'),
"toDateTime(intDiv(toUInt32(toDateTime({col})), 300)*300)"),
Grain('10 minute', _('10 minute'),
"toDateTime(intDiv(toUInt32(toDateTime({col})), 600)*600)"),
Grain('hour', _('hour'),
"toStartOfHour(toDateTime({col}))"),
Grain('day', _('day'),
"toStartOfDay(toDateTime({col}))"),
Grain('month', _('month'),
"toStartOfMonth(toDateTime({col}))"),
Grain('quarter', _('quarter'),
"toStartOfQuarter(toDateTime({col}))"),
Grain('year', _('year'),
"toStartOfYear(toDateTime({col}))"),
)
@classmethod
def convert_dttm(cls, target_type, dttm):
tt = target_type.upper()
if tt == 'DATE':
return "toDate('{}')".format(dttm.strftime('%Y-%m-%d'))
if tt == 'DATETIME':
return "toDateTime('{}')".format(
dttm.strftime('%Y-%m-%d %H:%M:%S'))
return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
class BQEngineSpec(BaseEngineSpec):
"""Engine spec for Google's BigQuery
As contributed by @mxmzdlv on issue #945"""
engine = 'bigquery'
time_grains = (
Grain("Time Column", _('Time Column'), "{col}"),
Grain("second", _('second'), "TIMESTAMP_TRUNC({col}, SECOND)"),
Grain("minute", _('minute'), "TIMESTAMP_TRUNC({col}, MINUTE)"),
Grain("hour", _('hour'), "TIMESTAMP_TRUNC({col}, HOUR)"),
Grain("day", _('day'), "TIMESTAMP_TRUNC({col}, DAY)"),
Grain("week", _('week'), "TIMESTAMP_TRUNC({col}, WEEK)"),
Grain("month", _('month'), "TIMESTAMP_TRUNC({col}, MONTH)"),
Grain("quarter", _('quarter'), "TIMESTAMP_TRUNC({col}, QUARTER)"),
Grain("year", _('year'), "TIMESTAMP_TRUNC({col}, YEAR)"),
)
@classmethod
def convert_dttm(cls, target_type, dttm):
tt = target_type.upper()
if tt == 'DATE':
return "'{}'".format(dttm.strftime('%Y-%m-%d'))
else:
return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
class ImpalaEngineSpec(BaseEngineSpec):
"""Engine spec for Cloudera's Impala"""
engine = 'impala'
time_grains = (
Grain("Time Column", _('Time Column'), "{col}"),
Grain("minute", _('minute'), "TRUNC({col}, 'MI')"),
Grain("hour", _('hour'), "TRUNC({col}, 'HH')"),
Grain("day", _('day'), "TRUNC({col}, 'DD')"),
Grain("week", _('week'), "TRUNC({col}, 'WW')"),
Grain("month", _('month'), "TRUNC({col}, 'MONTH')"),
Grain("quarter", _('quarter'), "TRUNC({col}, 'Q')"),
Grain("year", _('year'), "TRUNC({col}, 'YYYY')"),
)
@classmethod
def convert_dttm(cls, target_type, dttm):
tt = target_type.upper()
if tt == 'DATE':
return "'{}'".format(dttm.strftime('%Y-%m-%d'))
else:
return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
engines = {
o.engine: o for o in globals().values()
if inspect.isclass(o) and issubclass(o, BaseEngineSpec)}
| {
"content_hash": "0f347b3011495ebaf0404c92227b9f8c",
"timestamp": "",
"source": "github",
"line_count": 1008,
"max_line_length": 79,
"avg_line_length": 37.6875,
"alnum_prop": 0.5501066098081023,
"repo_name": "nekia/incubator-superset-dev",
"id": "0b804b361305d05cc963366f574c6c8cfaba73c5",
"size": "37989",
"binary": false,
"copies": "1",
"ref": "refs/heads/extension_for_madlib",
"path": "superset/db_engine_specs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "64602"
},
{
"name": "HTML",
"bytes": "101867"
},
{
"name": "JavaScript",
"bytes": "614450"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "PLpgSQL",
"bytes": "5303"
},
{
"name": "Python",
"bytes": "748520"
},
{
"name": "Shell",
"bytes": "1046"
}
],
"symlink_target": ""
} |
from os.path import abspath, join, dirname
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='ccm',
version='2.0.2',
description='Cassandra Cluster Manager',
long_description=open(abspath(join(dirname(__file__), 'README.md'))).read(),
author='Sylvain Lebresne',
author_email='sylvain@datastax.com',
url='https://github.com/pcmanus/ccm',
packages=['ccmlib', 'ccmlib.cmds'],
scripts=['ccm'],
install_requires=['pyYaml', 'six >=1.4.1'],
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4'
],
)
| {
"content_hash": "10f3526a5b1b8426f69437f53f708922",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 80,
"avg_line_length": 30.806451612903224,
"alnum_prop": 0.6167539267015707,
"repo_name": "spodkowinski/ccm",
"id": "bcf57c746b91170dbba9d9331d3ebda410b53f57",
"size": "978",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "213607"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2020 Ad Schellevis <ad@opnsense.org>
Copyright (c) 2020 devNan0 <nan0@nan0.dev>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import re
import datetime
from . import BaseLogFormat
freeradius_timeformat = r'^([A-Za-z]{3}\s[A-Za-z]{3}\s\d{1,2}\s\d{2}:\d{2}:\d{2}\s\d{4}\s[:]).*'
class FreeRADIUSLogFormat(BaseLogFormat):
def __init__(self, filename):
super(FreeRADIUSLogFormat, self).__init__(filename)
self._priority = 100
def match(self, line):
return self._filename.find('radius') > -1 and re.match(freeradius_timeformat, line) is not None
@staticmethod
def timestamp(line):
tmp = re.match(freeradius_timeformat, line)
grp = tmp.group(1)
return datetime.datetime.strptime(grp, "%a %b %d %H:%M:%S %Y :").isoformat()
@staticmethod
def line(line):
return line[26:].strip()
| {
"content_hash": "d742381dc7b50aea3b22362a7019a5db",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 104,
"avg_line_length": 43.02040816326531,
"alnum_prop": 0.709203036053131,
"repo_name": "dharrigan/plugins",
"id": "23fc0d739840dbda6eb693478f94e256fc7b8c1e",
"size": "2108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "net/freeradius/src/opnsense/scripts/systemhealth/logformats/freeradius.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "1079180"
},
{
"name": "Makefile",
"bytes": "23141"
},
{
"name": "Mathematica",
"bytes": "286"
},
{
"name": "PHP",
"bytes": "1408469"
},
{
"name": "Perl",
"bytes": "3688"
},
{
"name": "Python",
"bytes": "32924"
},
{
"name": "Ruby",
"bytes": "61798"
},
{
"name": "Shell",
"bytes": "20679"
},
{
"name": "Volt",
"bytes": "487603"
}
],
"symlink_target": ""
} |
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.db import connection
from bluebottle.wallposts.models import MediaWallpost, TextWallpost
from bluebottle.common.tasks import _post_to_facebook
@receiver(post_save, sender=MediaWallpost)
@receiver(post_save, sender=TextWallpost)
def post_to_facebook(sender, instance, created, **kwargs):
try:
tenant = connection.tenant
except AttributeError:
tenant = None
if created and instance.share_with_facebook:
_post_to_facebook.apply_async(
args=[instance],
kwargs={'tenant': tenant},
countdown=5
)
| {
"content_hash": "621c89e41bfcf823614468dc40c59698",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 67,
"avg_line_length": 28,
"alnum_prop": 0.7038690476190477,
"repo_name": "jfterpstra/bluebottle",
"id": "5b7f46863b6a39ebec4d21ad6bf7376e06ff6104",
"size": "673",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "bluebottle/wallposts/signals.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "16556"
},
{
"name": "HTML",
"bytes": "173443"
},
{
"name": "JavaScript",
"bytes": "434"
},
{
"name": "PostScript",
"bytes": "2927"
},
{
"name": "Python",
"bytes": "1694079"
},
{
"name": "Shell",
"bytes": "2951"
},
{
"name": "Smarty",
"bytes": "4317"
}
],
"symlink_target": ""
} |
"""A module for storage specific test cases"""
import testbase
from utils import *
log = get_logger('auto-cert-kit')
class PerfTestClass(testbase.LocalStorageTestClass):
"""A somewhat generic test class for local storage
performance tests that could be expanded to include
additional plugin-based tasks"""
#Deine the test timeout in seconds and the number of test VMs
timeout = 3600
vm_count = 3
#SSH command variables
username = 'root'
password = DEFAULT_PASSWORD
#Class variables
test = ''
cmd_str = ''
def _setup_vms(self, session):
"""Creates vm_count VMs on the
master host's local SR"""
host_ref = get_pool_master(session)
net_ref = get_management_network(session)
if 'device_config' in self.config and 'sr' in self.config['device_config']:
sr_ref = self.config['device_config']['sr']
else:
log.debug("Local SR info is not available from device tag.")
log.debug("Choosing first local SR.")
sr_ref = get_local_sr(session, host_ref)
log.debug("%s is chosen for local storage test." % sr_ref)
return deploy_count_droid_vms_on_host(session,
host_ref,
[net_ref],
self.vm_count,
{net_ref: self.get_static_manager(net_ref)},
sr_ref)
def _call_plugin(self, session, vm_ref_list, call):
"""Util function to call a XenAPI plugin"""
res = []
host = get_pool_master(session)
for vm_ref in vm_ref_list:
res.append(self.session.xenapi.host.call_plugin(host,
'autocertkit',
call,
{'vm_ref': vm_ref,
'username': self.username,
'password': self.password}))
return res
def _create_test_threads(self, session, vm_ref_list):
"""Spawns a new test thread using the cmd_strin a
timeout function over SSH to every VM in vm_ref_list"""
threads = []
for vm_ref in vm_ref_list:
vm_ip = wait_for_ip(session, vm_ref, 'eth0')
threads.append(create_test_thread(lambda: TimeoutFunction(ssh_command(vm_ip,
self.username,
self.password,
self.cmd_str),
self.timeout, '%s test timed out %d' % (self.test, self.timeout))))
return threads
def _run_test(self, session):
"""Run test function"""
#setup vms
vm_ref_list = self._setup_vms(session)
#Make certain the VMs are available
for vm_ref in vm_ref_list:
check_vm_ping_response(session, vm_ref)
#deploy test rpms
self._call_plugin(session, vm_ref_list, 'deploy_' + self.test)
#create, start test threads, wait until complete
log.debug("About to run %s test..." % self.test)
threads = self._create_test_threads(session, vm_ref_list)
#Wait for the threads to finish running or timeout
start = time.time()
while check_test_thread_status(threads):
time.sleep(1)
if should_timeout(start, self.timeout):
raise Exception("%s test timed out %s" % (self.test, self.timeout))
#retrieve the logs
log.debug("%s test is complete, retrieving logs" % self.test)
res = self._call_plugin(session, vm_ref_list, 'retrieve_' + self.test + '_logs')
return {'info': 'Test ran successfully'}
def test_iozone(self, session):
"""Perform the IOZone Local Storage benchmark"""
self.test = 'iozone'
self.cmd_str = '/usr/bin/iozone -r 4k -r 128k -r 1m -s 128m >> /root/localhost.log'
return self._run_test(session)
def test_bonnie(self, session):
"""Perform the Bonnie++ local storage benchmark"""
config = {'scratch_dir': '/root/bonnie',
'file_size': '2000',
'count': '1',
'user': 'citrix',
'log': '2>&1 | tee /root/localhost.log'}
self.test = 'bonnie'
self.cmd_str = 'bonnie++ -d %s -s %s -x %s -u %s %s' % (config['scratch_dir'],
config['file_size'],
config['count'],
config['user'],
config['log'])
return self._run_test(session)
| {
"content_hash": "10a96b069baa61fb577e462d7288d60a",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 137,
"avg_line_length": 44.19166666666667,
"alnum_prop": 0.46351122006411466,
"repo_name": "bpwook/auto-cert-kit",
"id": "19c585f3a6ea655058201b42217db9dabda9861f",
"size": "6692",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "autocertkit/storage_tests.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "6494"
},
{
"name": "Python",
"bytes": "380986"
},
{
"name": "Shell",
"bytes": "1410"
}
],
"symlink_target": ""
} |
from __future__ import division, absolute_import
import os, sys, errno, warnings
try:
import pwd, grp
except ImportError:
pwd = grp = None
try:
from os import setgroups, getgroups
except ImportError:
setgroups = getgroups = None
from twisted.python.compat import _PY3, unicode
if _PY3:
UserDict = object
else:
from UserDict import UserDict
class InsensitiveDict:
"""Dictionary, that has case-insensitive keys.
Normally keys are retained in their original form when queried with
.keys() or .items(). If initialized with preserveCase=0, keys are both
looked up in lowercase and returned in lowercase by .keys() and .items().
"""
"""
Modified recipe at
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66315 originally
contributed by Sami Hangaslammi.
"""
def __init__(self, dict=None, preserve=1):
"""Create an empty dictionary, or update from 'dict'."""
self.data = {}
self.preserve=preserve
if dict:
self.update(dict)
def __delitem__(self, key):
k=self._lowerOrReturn(key)
del self.data[k]
def _lowerOrReturn(self, key):
if isinstance(key, bytes) or isinstance(key, unicode):
return key.lower()
else:
return key
def __getitem__(self, key):
"""Retrieve the value associated with 'key' (in any case)."""
k = self._lowerOrReturn(key)
return self.data[k][1]
def __setitem__(self, key, value):
"""Associate 'value' with 'key'. If 'key' already exists, but
in different case, it will be replaced."""
k = self._lowerOrReturn(key)
self.data[k] = (key, value)
def has_key(self, key):
"""Case insensitive test whether 'key' exists."""
k = self._lowerOrReturn(key)
return k in self.data
__contains__=has_key
def _doPreserve(self, key):
if not self.preserve and (isinstance(key, bytes)
or isinstance(key, unicode)):
return key.lower()
else:
return key
def keys(self):
"""List of keys in their original case."""
return list(self.iterkeys())
def values(self):
"""List of values."""
return list(self.itervalues())
def items(self):
"""List of (key,value) pairs."""
return list(self.iteritems())
def get(self, key, default=None):
"""Retrieve value associated with 'key' or return default value
if 'key' doesn't exist."""
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default):
"""If 'key' doesn't exists, associate it with the 'default' value.
Return value associated with 'key'."""
if not self.has_key(key):
self[key] = default
return self[key]
def update(self, dict):
"""Copy (key,value) pairs from 'dict'."""
for k,v in dict.items():
self[k] = v
def __repr__(self):
"""String representation of the dictionary."""
items = ", ".join([("%r: %r" % (k,v)) for k,v in self.items()])
return "InsensitiveDict({%s})" % items
def iterkeys(self):
for v in self.data.values():
yield self._doPreserve(v[0])
def itervalues(self):
for v in self.data.values():
yield v[1]
def iteritems(self):
for (k, v) in self.data.values():
yield self._doPreserve(k), v
def popitem(self):
i=self.items()[0]
del self[i[0]]
return i
def clear(self):
for k in self.keys():
del self[k]
def copy(self):
return InsensitiveDict(self, self.preserve)
def __len__(self):
return len(self.data)
def __eq__(self, other):
for k,v in self.items():
if not (k in other) or not (other[k]==v):
return 0
return len(self)==len(other)
class OrderedDict(UserDict):
"""A UserDict that preserves insert order whenever possible."""
def __init__(self, dict=None, **kwargs):
self._order = []
self.data = {}
if dict is not None:
if hasattr(dict,'keys'):
self.update(dict)
else:
for k,v in dict: # sequence
self[k] = v
if len(kwargs):
self.update(kwargs)
def __repr__(self):
return '{'+', '.join([('%r: %r' % item) for item in self.items()])+'}'
def __setitem__(self, key, value):
if not self.has_key(key):
self._order.append(key)
UserDict.__setitem__(self, key, value)
def copy(self):
return self.__class__(self)
def __delitem__(self, key):
UserDict.__delitem__(self, key)
self._order.remove(key)
def iteritems(self):
for item in self._order:
yield (item, self[item])
def items(self):
return list(self.iteritems())
def itervalues(self):
for item in self._order:
yield self[item]
def values(self):
return list(self.itervalues())
def iterkeys(self):
return iter(self._order)
def keys(self):
return list(self._order)
def popitem(self):
key = self._order[-1]
value = self[key]
del self[key]
return (key, value)
def setdefault(self, item, default):
if self.has_key(item):
return self[item]
self[item] = default
return default
def update(self, d):
for k, v in d.items():
self[k] = v
def uniquify(lst):
"""Make the elements of a list unique by inserting them into a dictionary.
This must not change the order of the input lst.
"""
dct = {}
result = []
for k in lst:
if k not in dct:
result.append(k)
dct[k] = 1
return result
def padTo(n, seq, default=None):
"""
Pads a sequence out to n elements,
filling in with a default value if it is not long enough.
If the input sequence is longer than n, raises ValueError.
Details, details:
This returns a new list; it does not extend the original sequence.
The new list contains the values of the original sequence, not copies.
"""
if len(seq) > n:
raise ValueError("%d elements is more than %d." % (len(seq), n))
blank = [default] * n
blank[:len(seq)] = list(seq)
return blank
def getPluginDirs():
warnings.warn(
"twisted.python.util.getPluginDirs is deprecated since Twisted 12.2.",
DeprecationWarning, stacklevel=2)
import twisted
systemPlugins = os.path.join(os.path.dirname(os.path.dirname(
os.path.abspath(twisted.__file__))), 'plugins')
userPlugins = os.path.expanduser("~/TwistedPlugins")
confPlugins = os.path.expanduser("~/.twisted")
allPlugins = filter(os.path.isdir, [systemPlugins, userPlugins, confPlugins])
return allPlugins
def addPluginDir():
warnings.warn(
"twisted.python.util.addPluginDir is deprecated since Twisted 12.2.",
DeprecationWarning, stacklevel=2)
sys.path.extend(getPluginDirs())
def sibpath(path, sibling):
"""
Return the path to a sibling of a file in the filesystem.
This is useful in conjunction with the special C{__file__} attribute
that Python provides for modules, so modules can load associated
resource files.
"""
return os.path.join(os.path.dirname(os.path.abspath(path)), sibling)
def _getpass(prompt):
"""
Helper to turn IOErrors into KeyboardInterrupts.
"""
import getpass
try:
return getpass.getpass(prompt)
except IOError as e:
if e.errno == errno.EINTR:
raise KeyboardInterrupt
raise
except EOFError:
raise KeyboardInterrupt
def getPassword(prompt = 'Password: ', confirm = 0, forceTTY = 0,
confirmPrompt = 'Confirm password: ',
mismatchMessage = "Passwords don't match."):
"""Obtain a password by prompting or from stdin.
If stdin is a terminal, prompt for a new password, and confirm (if
C{confirm} is true) by asking again to make sure the user typed the same
thing, as keystrokes will not be echoed.
If stdin is not a terminal, and C{forceTTY} is not true, read in a line
and use it as the password, less the trailing newline, if any. If
C{forceTTY} is true, attempt to open a tty and prompt for the password
using it. Raise a RuntimeError if this is not possible.
@returns: C{str}
"""
isaTTY = hasattr(sys.stdin, 'isatty') and sys.stdin.isatty()
old = None
try:
if not isaTTY:
if forceTTY:
try:
old = sys.stdin, sys.stdout
sys.stdin = sys.stdout = open('/dev/tty', 'r+')
except:
raise RuntimeError("Cannot obtain a TTY")
else:
password = sys.stdin.readline()
if password[-1] == '\n':
password = password[:-1]
return password
while 1:
try1 = _getpass(prompt)
if not confirm:
return try1
try2 = _getpass(confirmPrompt)
if try1 == try2:
return try1
else:
sys.stderr.write(mismatchMessage + "\n")
finally:
if old:
sys.stdin.close()
sys.stdin, sys.stdout = old
def println(*a):
sys.stdout.write(' '.join(map(str, a))+'\n')
# XXX
# This does not belong here
# But where does it belong?
def str_xor(s, b):
return ''.join([chr(ord(c) ^ b) for c in s])
def makeStatBar(width, maxPosition, doneChar = '=', undoneChar = '-', currentChar = '>'):
"""
Creates a function that will return a string representing a progress bar.
"""
aValue = width / float(maxPosition)
def statBar(position, force = 0, last = ['']):
assert len(last) == 1, "Don't mess with the last parameter."
done = int(aValue * position)
toDo = width - done - 2
result = "[%s%s%s]" % (doneChar * done, currentChar, undoneChar * toDo)
if force:
last[0] = result
return result
if result == last[0]:
return ''
last[0] = result
return result
statBar.__doc__ = """statBar(position, force = 0) -> '[%s%s%s]'-style progress bar
returned string is %d characters long, and the range goes from 0..%d.
The 'position' argument is where the '%s' will be drawn. If force is false,
'' will be returned instead if the resulting progress bar is identical to the
previously returned progress bar.
""" % (doneChar * 3, currentChar, undoneChar * 3, width, maxPosition, currentChar)
return statBar
def spewer(frame, s, ignored):
"""
A trace function for sys.settrace that prints every function or method call.
"""
from twisted.python import reflect
if frame.f_locals.has_key('self'):
se = frame.f_locals['self']
if hasattr(se, '__class__'):
k = reflect.qual(se.__class__)
else:
k = reflect.qual(type(se))
print('method %s of %s at %s' % (
frame.f_code.co_name, k, id(se)))
else:
print('function %s in %s, line %s' % (
frame.f_code.co_name,
frame.f_code.co_filename,
frame.f_lineno))
def searchupwards(start, files=[], dirs=[]):
"""
Walk upwards from start, looking for a directory containing
all files and directories given as arguments::
>>> searchupwards('.', ['foo.txt'], ['bar', 'bam'])
If not found, return None
"""
start=os.path.abspath(start)
parents=start.split(os.sep)
exists=os.path.exists; join=os.sep.join; isdir=os.path.isdir
while len(parents):
candidate=join(parents)+os.sep
allpresent=1
for f in files:
if not exists("%s%s" % (candidate, f)):
allpresent=0
break
if allpresent:
for d in dirs:
if not isdir("%s%s" % (candidate, d)):
allpresent=0
break
if allpresent: return candidate
parents.pop(-1)
return None
class LineLog:
"""
A limited-size line-based log, useful for logging line-based
protocols such as SMTP.
When the log fills up, old entries drop off the end.
"""
def __init__(self, size=10):
"""
Create a new log, with size lines of storage (default 10).
A log size of 0 (or less) means an infinite log.
"""
if size < 0:
size = 0
self.log = [None]*size
self.size = size
def append(self,line):
if self.size:
self.log[:-1] = self.log[1:]
self.log[-1] = line
else:
self.log.append(line)
def str(self):
return '\n'.join(filter(None,self.log))
def __getitem__(self, item):
return filter(None,self.log)[item]
def clear(self):
"""Empty the log"""
self.log = [None]*self.size
def raises(exception, f, *args, **kwargs):
"""
Determine whether the given call raises the given exception.
"""
try:
f(*args, **kwargs)
except exception:
return 1
return 0
class IntervalDifferential:
"""
Given a list of intervals, generate the amount of time to sleep between
"instants".
For example, given 7, 11 and 13, the three (infinite) sequences::
7 14 21 28 35 ...
11 22 33 44 ...
13 26 39 52 ...
will be generated, merged, and used to produce::
(7, 0) (4, 1) (2, 2) (1, 0) (7, 0) (1, 1) (4, 2) (2, 0) (5, 1) (2, 0)
New intervals may be added or removed as iteration proceeds using the
proper methods.
"""
def __init__(self, intervals, default=60):
"""
@type intervals: C{list} of C{int}, C{long}, or C{float} param
@param intervals: The intervals between instants.
@type default: C{int}, C{long}, or C{float}
@param default: The duration to generate if the intervals list
becomes empty.
"""
self.intervals = intervals[:]
self.default = default
def __iter__(self):
return _IntervalDifferentialIterator(self.intervals, self.default)
class _IntervalDifferentialIterator:
def __init__(self, i, d):
self.intervals = [[e, e, n] for (e, n) in zip(i, range(len(i)))]
self.default = d
self.last = 0
def next(self):
if not self.intervals:
return (self.default, None)
last, index = self.intervals[0][0], self.intervals[0][2]
self.intervals[0][0] += self.intervals[0][1]
self.intervals.sort()
result = last - self.last
self.last = last
return result, index
def addInterval(self, i):
if self.intervals:
delay = self.intervals[0][0] - self.intervals[0][1]
self.intervals.append([delay + i, i, len(self.intervals)])
self.intervals.sort()
else:
self.intervals.append([i, i, 0])
def removeInterval(self, interval):
for i in range(len(self.intervals)):
if self.intervals[i][1] == interval:
index = self.intervals[i][2]
del self.intervals[i]
for i in self.intervals:
if i[2] > index:
i[2] -= 1
return
raise ValueError("Specified interval not in IntervalDifferential")
class FancyStrMixin:
"""
Mixin providing a flexible implementation of C{__str__}.
C{__str__} output will begin with the name of the class, or the contents
of the attribute C{fancybasename} if it is set.
The body of C{__str__} can be controlled by overriding C{showAttributes} in
a subclass. Set C{showAttributes} to a sequence of strings naming
attributes, or sequences of C{(attributeName, callable)}, or sequences of
C{(attributeName, displayName, formatCharacter)}. In the second case, the
callable is passed the value of the attribute and its return value used in
the output of C{__str__}. In the final case, the attribute is looked up
using C{attributeName}, but the output uses C{displayName} instead, and
renders the value of the attribute using C{formatCharacter}, e.g. C{"%.3f"}
might be used for a float.
"""
# Override in subclasses:
showAttributes = ()
def __str__(self):
r = ['<', (hasattr(self, 'fancybasename') and self.fancybasename)
or self.__class__.__name__]
for attr in self.showAttributes:
if isinstance(attr, str):
r.append(' %s=%r' % (attr, getattr(self, attr)))
elif len(attr) == 2:
r.append((' %s=' % (attr[0],)) + attr[1](getattr(self, attr[0])))
else:
r.append((' %s=' + attr[2]) % (attr[1], getattr(self, attr[0])))
r.append('>')
return ''.join(r)
__repr__ = __str__
class FancyEqMixin:
"""
Mixin that implements C{__eq__} and C{__ne__}.
Comparison is done using the list of attributes defined in
C{compareAttributes}.
"""
compareAttributes = ()
def __eq__(self, other):
if not self.compareAttributes:
return self is other
if isinstance(self, other.__class__):
return (
[getattr(self, name) for name in self.compareAttributes] ==
[getattr(other, name) for name in self.compareAttributes])
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
try:
# Python 2.7 / Python 3.3
from os import initgroups as _c_initgroups
except ImportError:
try:
# Python 2.6
from twisted.python._initgroups import initgroups as _c_initgroups
except ImportError:
_c_initgroups = None
if pwd is None or grp is None or setgroups is None or getgroups is None:
def initgroups(uid, primaryGid):
"""
Do nothing.
Underlying platform support require to manipulate groups is missing.
"""
else:
# Fallback to the inefficient Python version
def _setgroups_until_success(l):
while(1):
# NASTY NASTY HACK (but glibc does it so it must be okay):
# In case sysconfig didn't give the right answer, find the limit
# on max groups by just looping, trying to set fewer and fewer
# groups each time until it succeeds.
try:
setgroups(l)
except ValueError:
# This exception comes from python itself restricting
# number of groups allowed.
if len(l) > 1:
del l[-1]
else:
raise
except OSError as e:
if e.errno == errno.EINVAL and len(l) > 1:
# This comes from the OS saying too many groups
del l[-1]
else:
raise
else:
# Success, yay!
return
def initgroups(uid, primaryGid):
"""
Initializes the group access list.
If the C extension is present, we're calling it, which in turn calls
initgroups(3).
If not, this is done by reading the group database /etc/group and using
all groups of which C{uid} is a member. The additional group
C{primaryGid} is also added to the list.
If the given user is a member of more than C{NGROUPS}, arbitrary
groups will be silently discarded to bring the number below that
limit.
@type uid: C{int}
@param uid: The UID for which to look up group information.
@type primaryGid: C{int} or C{NoneType}
@param primaryGid: If provided, an additional GID to include when
setting the groups.
"""
if _c_initgroups is not None:
return _c_initgroups(pwd.getpwuid(uid)[0], primaryGid)
try:
# Try to get the maximum number of groups
max_groups = os.sysconf("SC_NGROUPS_MAX")
except:
# No predefined limit
max_groups = 0
username = pwd.getpwuid(uid)[0]
l = []
if primaryGid is not None:
l.append(primaryGid)
for groupname, password, gid, userlist in grp.getgrall():
if username in userlist:
l.append(gid)
if len(l) == max_groups:
break # No more groups, ignore any more
try:
_setgroups_until_success(l)
except OSError as e:
# We might be able to remove this code now that we
# don't try to setgid/setuid even when not asked to.
if e.errno == errno.EPERM:
for g in getgroups():
if g not in l:
raise
else:
raise
def switchUID(uid, gid, euid=False):
"""
Attempts to switch the uid/euid and gid/egid for the current process.
If C{uid} is the same value as L{os.getuid} (or L{os.geteuid}),
this function will issue a L{UserWarning} and not raise an exception.
@type uid: C{int} or C{NoneType}
@param uid: the UID (or EUID) to switch the current process to. This
parameter will be ignored if the value is C{None}.
@type gid: C{int} or C{NoneType}
@param gid: the GID (or EGID) to switch the current process to. This
parameter will be ignored if the value is C{None}.
@type euid: C{bool}
@param euid: if True, set only effective user-id rather than real user-id.
(This option has no effect unless the process is running
as root, in which case it means not to shed all
privileges, retaining the option to regain privileges
in cases such as spawning processes. Use with caution.)
"""
if euid:
setuid = os.seteuid
setgid = os.setegid
getuid = os.geteuid
else:
setuid = os.setuid
setgid = os.setgid
getuid = os.getuid
if gid is not None:
setgid(gid)
if uid is not None:
if uid == getuid():
uidText = (euid and "euid" or "uid")
actionText = "tried to drop privileges and set%s %s" % (uidText, uid)
problemText = "%s is already %s" % (uidText, getuid())
warnings.warn("%s but %s; should we be root? Continuing."
% (actionText, problemText))
else:
initgroups(uid, gid)
setuid(uid)
class SubclassableCStringIO(object):
"""
A wrapper around cStringIO to allow for subclassing.
"""
__csio = None
def __init__(self, *a, **kw):
from cStringIO import StringIO
self.__csio = StringIO(*a, **kw)
def __iter__(self):
return self.__csio.__iter__()
def next(self):
return self.__csio.next()
def close(self):
return self.__csio.close()
def isatty(self):
return self.__csio.isatty()
def seek(self, pos, mode=0):
return self.__csio.seek(pos, mode)
def tell(self):
return self.__csio.tell()
def read(self, n=-1):
return self.__csio.read(n)
def readline(self, length=None):
return self.__csio.readline(length)
def readlines(self, sizehint=0):
return self.__csio.readlines(sizehint)
def truncate(self, size=None):
return self.__csio.truncate(size)
def write(self, s):
return self.__csio.write(s)
def writelines(self, list):
return self.__csio.writelines(list)
def flush(self):
return self.__csio.flush()
def getvalue(self):
return self.__csio.getvalue()
def untilConcludes(f, *a, **kw):
"""
Call C{f} with the given arguments, handling C{EINTR} by retrying.
@param f: A function to call.
@param *a: Positional arguments to pass to C{f}.
@param **kw: Keyword arguments to pass to C{f}.
@return: Whatever C{f} returns.
@raise: Whatever C{f} raises, except for C{IOError} or C{OSError} with
C{errno} set to C{EINTR}.
"""
while True:
try:
return f(*a, **kw)
except (IOError, OSError) as e:
if e.args[0] == errno.EINTR:
continue
raise
def mergeFunctionMetadata(f, g):
"""
Overwrite C{g}'s name and docstring with values from C{f}. Update
C{g}'s instance dictionary with C{f}'s.
@return: A function that has C{g}'s behavior and metadata merged from
C{f}.
"""
try:
g.__name__ = f.__name__
except TypeError:
pass
try:
g.__doc__ = f.__doc__
except (TypeError, AttributeError):
pass
try:
g.__dict__.update(f.__dict__)
except (TypeError, AttributeError):
pass
try:
g.__module__ = f.__module__
except TypeError:
pass
return g
def nameToLabel(mname):
"""
Convert a string like a variable name into a slightly more human-friendly
string with spaces and capitalized letters.
@type mname: C{str}
@param mname: The name to convert to a label. This must be a string
which could be used as a Python identifier. Strings which do not take
this form will result in unpredictable behavior.
@rtype: C{str}
"""
labelList = []
word = ''
lastWasUpper = False
for letter in mname:
if letter.isupper() == lastWasUpper:
# Continuing a word.
word += letter
else:
# breaking a word OR beginning a word
if lastWasUpper:
# could be either
if len(word) == 1:
# keep going
word += letter
else:
# acronym
# we're processing the lowercase letter after the acronym-then-capital
lastWord = word[:-1]
firstLetter = word[-1]
labelList.append(lastWord)
word = firstLetter + letter
else:
# definitely breaking: lower to upper
labelList.append(word)
word = letter
lastWasUpper = letter.isupper()
if labelList:
labelList[0] = labelList[0].capitalize()
else:
return mname.capitalize()
labelList.append(word)
return ' '.join(labelList)
def uidFromString(uidString):
"""
Convert a user identifier, as a string, into an integer UID.
@type uid: C{str}
@param uid: A string giving the base-ten representation of a UID or the
name of a user which can be converted to a UID via L{pwd.getpwnam}.
@rtype: C{int}
@return: The integer UID corresponding to the given string.
@raise ValueError: If the user name is supplied and L{pwd} is not
available.
"""
try:
return int(uidString)
except ValueError:
if pwd is None:
raise
return pwd.getpwnam(uidString)[2]
def gidFromString(gidString):
"""
Convert a group identifier, as a string, into an integer GID.
@type uid: C{str}
@param uid: A string giving the base-ten representation of a GID or the
name of a group which can be converted to a GID via L{grp.getgrnam}.
@rtype: C{int}
@return: The integer GID corresponding to the given string.
@raise ValueError: If the group name is supplied and L{grp} is not
available.
"""
try:
return int(gidString)
except ValueError:
if grp is None:
raise
return grp.getgrnam(gidString)[2]
def runAsEffectiveUser(euid, egid, function, *args, **kwargs):
"""
Run the given function wrapped with seteuid/setegid calls.
This will try to minimize the number of seteuid/setegid calls, comparing
current and wanted permissions
@param euid: effective UID used to call the function.
@type euid: C{int}
@type egid: effective GID used to call the function.
@param egid: C{int}
@param function: the function run with the specific permission.
@type function: any callable
@param *args: arguments passed to C{function}
@param **kwargs: keyword arguments passed to C{function}
"""
uid, gid = os.geteuid(), os.getegid()
if uid == euid and gid == egid:
return function(*args, **kwargs)
else:
if uid != 0 and (uid != euid or gid != egid):
os.seteuid(0)
if gid != egid:
os.setegid(egid)
if euid != 0 and (euid != uid or gid != egid):
os.seteuid(euid)
try:
return function(*args, **kwargs)
finally:
if euid != 0 and (uid != euid or gid != egid):
os.seteuid(0)
if gid != egid:
os.setegid(gid)
if uid != 0 and (uid != euid or gid != egid):
os.seteuid(uid)
def runWithWarningsSuppressed(suppressedWarnings, f, *args, **kwargs):
"""
Run C{f(*args, **kwargs)}, but with some warnings suppressed.
Unlike L{twisted.internet.utils.runWithWarningsSuppressed}, it has no
special support for L{twisted.internet.defer.Deferred}.
@param suppressedWarnings: A list of arguments to pass to filterwarnings.
Must be a sequence of 2-tuples (args, kwargs).
@param f: A callable.
@param args: Arguments for C{f}.
@param kwargs: Keyword arguments for C{f}
@return: The result of C{f(*args, **kwargs)}.
"""
with warnings.catch_warnings():
for a, kw in suppressedWarnings:
warnings.filterwarnings(*a, **kw)
return f(*args, **kwargs)
__all__ = [
"uniquify", "padTo", "getPluginDirs", "addPluginDir", "sibpath",
"getPassword", "println", "makeStatBar", "OrderedDict",
"InsensitiveDict", "spewer", "searchupwards", "LineLog",
"raises", "IntervalDifferential", "FancyStrMixin", "FancyEqMixin",
"switchUID", "SubclassableCStringIO", "mergeFunctionMetadata",
"nameToLabel", "uidFromString", "gidFromString", "runAsEffectiveUser",
"untilConcludes",
"runWithWarningsSuppressed",
]
if _PY3:
__all3__ = ["FancyEqMixin", "untilConcludes",
"runWithWarningsSuppressed", "FancyStrMixin", "nameToLabel",
"InsensitiveDict"]
for name in __all__[:]:
if name not in __all3__:
__all__.remove(name)
del globals()[name]
del name, __all3__
| {
"content_hash": "2c275ca2eb4c57a6264e7963af21ead8",
"timestamp": "",
"source": "github",
"line_count": 1042,
"max_line_length": 90,
"avg_line_length": 30.74184261036468,
"alnum_prop": 0.5528673555395998,
"repo_name": "timkrentz/SunTracker",
"id": "950fd6b35ee2e1e61cb20fefaba3a8ce9239b178",
"size": "32164",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/python/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "185699"
},
{
"name": "Assembly",
"bytes": "38582"
},
{
"name": "Batchfile",
"bytes": "110"
},
{
"name": "C",
"bytes": "48362836"
},
{
"name": "C++",
"bytes": "70478135"
},
{
"name": "CMake",
"bytes": "1755036"
},
{
"name": "CSS",
"bytes": "147795"
},
{
"name": "Cuda",
"bytes": "30026"
},
{
"name": "D",
"bytes": "2152"
},
{
"name": "GAP",
"bytes": "14495"
},
{
"name": "GLSL",
"bytes": "190912"
},
{
"name": "Groff",
"bytes": "66799"
},
{
"name": "HTML",
"bytes": "295090"
},
{
"name": "Java",
"bytes": "203238"
},
{
"name": "JavaScript",
"bytes": "1146098"
},
{
"name": "Lex",
"bytes": "47145"
},
{
"name": "Makefile",
"bytes": "5461"
},
{
"name": "Objective-C",
"bytes": "74727"
},
{
"name": "Objective-C++",
"bytes": "265817"
},
{
"name": "Pascal",
"bytes": "3407"
},
{
"name": "Perl",
"bytes": "178176"
},
{
"name": "Prolog",
"bytes": "4556"
},
{
"name": "Python",
"bytes": "16497901"
},
{
"name": "Shell",
"bytes": "48835"
},
{
"name": "Smarty",
"bytes": "1368"
},
{
"name": "Tcl",
"bytes": "1955829"
},
{
"name": "Yacc",
"bytes": "180651"
}
],
"symlink_target": ""
} |
from . import mtapi
GREENLINE = 902
def station_code_to_name_map():
codes_to_station = {}
station_to_codes = gather_station_codes()
for station in station_to_codes:
for code in station_to_codes[station]:
codes_to_station[code] = station
return codes_to_station
def gather_station_codes():
stations = {}
for d in [mtapi.EAST, mtapi.WEST]:
results = mtapi.stops(GREENLINE, d)
for r in results:
st_name = r['Text'].strip()
st_code = r['Value']
if st_name not in stations:
stations[st_name] = []
if st_code not in stations[st_name]:
stations[st_name].append(st_code)
return stations
def get_soon_departures(station_code):
soon_deps = []
departures = mtapi.timepointdepartures(GREENLINE, mtapi.EAST, station_code)
departures.extend(mtapi.timepointdepartures(GREENLINE, mtapi.WEST, station_code))
for dep in departures:
if 'Min' in dep['DepartureText'] or 'Due' in dep['DepartureText']:
soon_deps.append({'time': dep['DepartureText'],
'direction': dep['RouteDirection']})
return soon_deps
| {
"content_hash": "d5b59ab8846eb474d009f22d65f6f884",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 85,
"avg_line_length": 31.710526315789473,
"alnum_prop": 0.6049792531120332,
"repo_name": "kjschiroo/lightrail",
"id": "7053b53afa6d1e78517b4b524563b7c29c623a55",
"size": "1205",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lightrail/utilities.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5408"
}
],
"symlink_target": ""
} |
"""
Remote python server.
Execute Python commands remotely and send output back.
WARNING: This version has a gaping security hole -- it accepts requests
from any host on the Internet!
"""
import sys
from socket import socket, AF_INET, SOCK_STREAM
import io
import traceback
PORT = 4127
BUFSIZE = 1024
def main():
if len(sys.argv) > 1:
port = int(sys.argv[1])
else:
port = PORT
s = socket(AF_INET, SOCK_STREAM)
s.bind(('', port))
s.listen(1)
while True:
conn, (remotehost, remoteport) = s.accept()
with conn:
print('connection from', remotehost, remoteport)
request = b''
while True:
data = conn.recv(BUFSIZE)
if not data:
break
request += data
reply = execute(request.decode())
conn.send(reply.encode())
def execute(request):
stdout = sys.stdout
stderr = sys.stderr
sys.stdout = sys.stderr = fakefile = io.StringIO()
try:
try:
exec(request, {}, {})
except:
print()
traceback.print_exc(100)
finally:
sys.stderr = stderr
sys.stdout = stdout
return fakefile.getvalue()
try:
main()
except KeyboardInterrupt:
pass
| {
"content_hash": "7a1cc9348a3953b72d32d430f4895c87",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 71,
"avg_line_length": 23.196428571428573,
"alnum_prop": 0.567359507313318,
"repo_name": "massimo-nocentini/on-python",
"id": "a18de137fa1cf888e43c2824923de235f8e52af3",
"size": "1323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo-official/rpythond.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1714196"
},
{
"name": "Makefile",
"bytes": "2262"
},
{
"name": "Python",
"bytes": "273755"
}
],
"symlink_target": ""
} |
"""Wrapper for _decode_proto_sparse_op.so."""
from tensorflow.python.framework import load_library
from tensorflow.python.platform import resource_loader
decode_proto_sparse_module = load_library.load_op_library(
resource_loader.get_path_to_datafile('_decode_proto_sparse_op.so'))
decode_proto_sparse_v2 = decode_proto_sparse_module.decode_proto_sparse_v2
decode_proto_sparse_v3 = decode_proto_sparse_module.decode_proto_sparse_v3
decode_proto_sparse_v4 = decode_proto_sparse_module.decode_proto_sparse_v4
| {
"content_hash": "914cf2ab2619951b9b3878ce12cbf38e",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 74,
"avg_line_length": 46.63636363636363,
"alnum_prop": 0.7933723196881092,
"repo_name": "google/struct2tensor",
"id": "35a6f159f76532fea22f0d6ace15c54fe3bf9bc6",
"size": "1088",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "struct2tensor/ops/gen_decode_proto_sparse.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "204752"
},
{
"name": "Dockerfile",
"bytes": "5492"
},
{
"name": "Python",
"bytes": "707761"
},
{
"name": "Shell",
"bytes": "12734"
},
{
"name": "Smarty",
"bytes": "1293"
},
{
"name": "Starlark",
"bytes": "48983"
}
],
"symlink_target": ""
} |
__info__ = {
'name' : 'RSI Parsing Tool',
'version' : '0.1',
'description' : 'Reading tens of thousands of lines of RSI is quite time-consuming and it really burns your '
'eyes. This tool will pre-analyze the RSI file and give you hints on what info may contain '
'useful data.',
'status' : 'developing',
'author' : 'Tyr Chen',
'url' : 'rsi',
}
| {
"content_hash": "ddbda945197c74d3f18cecad3a289338",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 115,
"avg_line_length": 45.8,
"alnum_prop": 0.5065502183406113,
"repo_name": "tyrchen/karp",
"id": "dc147531700be0f7d81429a972651db88ab19254",
"size": "458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/rsi/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "80954"
},
{
"name": "JavaScript",
"bytes": "9891"
},
{
"name": "Python",
"bytes": "28681"
}
],
"symlink_target": ""
} |
class FrontPage:
"""
Page object models allow selectors to be defined in one place and then
used by all of the test.
"""
login_button_css = '#user-actions-footer > a'
logout_button_text = 'Sign out'
menu_button_data_css = '#main > ul > li:nth-child(1) > a'
menu_button_data_alt_css = '#main > ul > li:nth-child(1) > button'
drop_down_search_button_css = '#main > ul > li.dropdown.open > ul > li:nth-child(2) > a'
covid_survey_banner_button_css = '#walkme-survey-balloon-69379 > div > div.walkme-custom-balloon-inner-div > div.walkme-custom-balloon-top-div > div > div.walkme-click-and-hover.walkme-custom-balloon-close-button.walkme-action-close.walkme-inspect-ignore'
class SignInModal:
"""
Page object model.
"""
login_modal_class = 'auth0-lock-header-logo'
github_button_css = 'a[data-provider="github"]'
button_tag = 'button'
github_user_id_input_css = '#login_field'
github_password_input_css = '#password'
github_submit_button_css = '#login > div.auth-form-body.mt-3 > form > div > input.btn.btn-primary.btn-block'
two_step_user_id_input_css = '#username'
two_step_password_input_css = '#password'
two_step_submit_css = '#login > input'
two_step_send_sms_css = '#sms-send > input.submit-button'
two_step_code_input_css = '#otp'
two_step_submit_verification_css = '#otp-box > div > input.go-button'
class SearchPageList:
"""
Page object model.
"""
facet_box_class = 'facets'
facet_class = 'facet'
facet_group_expander = 'facet-group-title'
facet_expander = 'facet-expander'
category_title_class = 'facet-term__text'
number_class = 'facet-term__count'
category_title_class_radio = 'facet__radio-label'
number_class_radio = 'facet__radio-count'
download_metadata_button_xpath = '//*[contains(text(), "Download")]'
search_result_item = '(//div[@class="result-item__data"])[1]//a'
class SearchPageMatrix:
"""
Page object model.
"""
facet_box_class = 'facets.matrix-facets'
facets_left_class = 'facet'
class SearchPageSummary:
"""
Page object model.
"""
facet_box_class = 'facets.summary-facets'
facets_left_class = 'facet'
class ExperimentPage:
"""
Page object model.
"""
done_panel_class = 'done'
title_tag_name = 'h4'
graph_close_button_css = 'div > div:nth-child(2) > div.file-gallery-graph-header.collapsing-title > button'
sort_by_accession_xpath = '//*[@id="tables"]/div/div[2]/div[2]/div/table[2]/thead/tr[2]/th[1]/div'
all_buttons_tag_name = 'button'
download_graph_png_button_xpath = '//*[contains(text(), "Download Graph")]'
file_type_column_xpath = '//div[@class="file-gallery-counts"]//..//table[@class="table table-sortable"]//tr//td[2]'
accession_column_relative_xpath = '..//td[1]//span//div//span//a'
information_button_relative_xpath = '..//td[1]//span//button//i'
file_graph_tab_xpath = '//div[@class="tab-nav"]//li[2]'
file_table_tab_xpath = '//a[text()="File details"]'
assembly_selector_xpath = '//*[@id="tables"]/div/div[1]/div[1]/select[@class="form-control--select"]'
file_graph_id = 'pipeline-graph'
incl_deprecated_files_button_name = 'filterIncArchive'
walkme_corner_widget = '#walkme-player'
class FilePage:
"""
Page object model.
"""
download_button_xpath = '//*[contains(text(), "Download")]'
class AntibodyPage:
"""
Page object model.
"""
expanded_document_panels_xpath = '//div[@class="document__detail active"]//a[@href]'
class ReportPage:
"""
Page object model.
"""
download_tsv_report_button_xpath = '//*[contains(text(), "Download TSV")]'
class VisualizeModal:
"""
Page object model.
"""
modal_class = 'modal-content'
UCSC_link_partial_link_text = 'UCSC'
class DownloadModal:
"""
Page object model.
"""
download_button_xpath = '/html/body/div[2]/div/div/div[1]/div/div/div[3]/div/a[2]'
class InformationModal:
"""
Page object model.
"""
download_icon_xpath = '//div[@class="modal-body"]//i[@class="icon icon-download"]'
class NavBar:
"""
Page object model.
"""
testing_warning_banner_button_css = '#navbar > div.test-warning > div > button'
class LoadingSpinner:
"""
Page object model.
"""
loading_spinner_class = 'loading-spinner'
class DocumentPreview:
"""
Page object model.
"""
document_expand_buttons_class = 'document__file-detail-switch'
document_files_xpath = '//div[@class="document__file"]//a[@href]'
class UCSCGenomeBrowser:
"""
Page object model.
"""
zoom_one_id = 'hgt.in1'
| {
"content_hash": "d177df0a9dcbc7db785863bec74768f6",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 259,
"avg_line_length": 30.428571428571427,
"alnum_prop": 0.6323090055484422,
"repo_name": "ENCODE-DCC/pyencoded-tools",
"id": "3515529203526e21db4a3358e09020f40d3053ec",
"size": "4760",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qancode/pageobjects.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AngelScript",
"bytes": "9064"
},
{
"name": "Batchfile",
"bytes": "1411"
},
{
"name": "Jupyter Notebook",
"bytes": "19265564"
},
{
"name": "Python",
"bytes": "939689"
},
{
"name": "Shell",
"bytes": "5829"
}
],
"symlink_target": ""
} |
from .gfycat import Gfycat
| {
"content_hash": "f5e47a2b6d4854ce944974a9d730c153",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 26,
"avg_line_length": 27,
"alnum_prop": 0.8148148148148148,
"repo_name": "Tarinu/taribot",
"id": "af917b9fc21dced8be2ffc452cd64d52ec0b0a6b",
"size": "52",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "taribot/apis/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "874"
},
{
"name": "Python",
"bytes": "28566"
}
],
"symlink_target": ""
} |
import RPi.GPIO as GPIO
import sys
# Setup
GPIO.setmode(GPIO.BOARD)
GPIO.setup(int(sys.argv[1]), GPIO.OUT)
# Turn On
GPIO.output(int(sys.argv[1]), True)
# Turn off
#GPIO.output(sys.argv[1], False)
# Will Reset the GPIO
#GPIO.cleanup()
| {
"content_hash": "a77f44f86930b4db65b59f83bdc3c841",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 38,
"avg_line_length": 15.933333333333334,
"alnum_prop": 0.702928870292887,
"repo_name": "JamesMarino/Home-Automation",
"id": "67b2b5511c2d15b5ba77262ae372eb40631c2eb6",
"size": "239",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sources/GPIO/TurnOn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PHP",
"bytes": "1"
},
{
"name": "Python",
"bytes": "1098"
},
{
"name": "Shell",
"bytes": "51"
}
],
"symlink_target": ""
} |
from datetime import datetime
from portality.events.consumer import EventConsumer
from portality import constants
from portality import models
from portality.bll import DOAJ, exceptions
from portality.lib.seamless import SeamlessException
class ApplicationPublisherRevisionNotify(EventConsumer):
ID = "application:publisher:revision:notify"
@classmethod
def consumes(cls, event):
return event.id == constants.EVENT_APPLICATION_STATUS and \
event.context.get("application") is not None and \
event.context.get("old_status") != constants.APPLICATION_STATUS_REVISIONS_REQUIRED and \
event.context.get("new_status") == constants.APPLICATION_STATUS_REVISIONS_REQUIRED
@classmethod
def consume(cls, event):
app_source = event.context.get("application")
try:
application = models.Application(**app_source)
except SeamlessException as e:
raise exceptions.NoSuchObjectException("Unable to construct Application from supplied source - data structure validation error, {x}".format(x=e))
if application.owner is None:
return
svc = DOAJ.notificationsService()
notification = models.Notification()
notification.who = application.owner
notification.created_by = cls.ID
notification.classification = constants.NOTIFICATION_CLASSIFICATION_STATUS_CHANGE
datetime_object = datetime.strptime(application.date_applied, '%Y-%m-%dT%H:%M:%SZ')
date_applied = datetime_object.strftime("%d/%b/%Y")
notification.long = svc.long_notification(cls.ID).format(
application_title=application.bibjson().title,
date_applied=date_applied
)
notification.short = svc.short_notification(cls.ID)
svc.notify(notification)
| {
"content_hash": "53853417981982ef4ccaccc1b0b06855",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 157,
"avg_line_length": 40.977777777777774,
"alnum_prop": 0.6941431670281996,
"repo_name": "DOAJ/doaj",
"id": "1b363c445fbd533dbdc9ea38c795ff5031f7ccea",
"size": "1844",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "portality/events/consumers/application_publisher_revision_notify.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2399"
},
{
"name": "Dockerfile",
"bytes": "59"
},
{
"name": "HTML",
"bytes": "483733"
},
{
"name": "JavaScript",
"bytes": "952971"
},
{
"name": "Jinja",
"bytes": "15292"
},
{
"name": "Python",
"bytes": "3195030"
},
{
"name": "SCSS",
"bytes": "75276"
},
{
"name": "Shell",
"bytes": "28415"
}
],
"symlink_target": ""
} |
import unittest
from pluginsmanager.observer.observable_list import ObservableList
from pluginsmanager.observer.update_type import UpdateType
from unittest.mock import MagicMock, call
class ObservableListTest(unittest.TestCase):
def test_append(self):
element = {'test': 5, 'a':[1,2,3,5]}
lista = ObservableList()
lista.observer = MagicMock()
lista.append(element)
lista.observer.assert_called_once_with(UpdateType.CREATED, element, 0)
def test_insert(self):
lista = ObservableList()
lista.append(0)
lista.append(2)
lista.observer = MagicMock()
lista.insert(1, 'a')
lista.observer.assert_called_once_with(UpdateType.CREATED, 'a', 1)
def test_remove(self):
lista = ObservableList()
lista.append(1)
lista.append('2')
lista.append(3)
lista.observer = MagicMock()
lista.remove('2')
lista.observer.assert_called_once_with(UpdateType.DELETED, '2', 1)
def test_pop_empty_parameter(self):
lista = ObservableList()
a = 'a'
b = 'b'
c = 'c'
d = 'd'
lista.append(a)
lista.append(b)
lista.append(c)
lista.append(d)
lista.observer = MagicMock()
self.assertEqual(d, lista.pop())
self.assertEqual(3, len(lista))
lista.observer.assert_called_once_with(UpdateType.DELETED, d, len(lista))
def test_pop_with_parameter(self):
lista = ObservableList()
a = 'a'
b = 'b'
c = 'c'
d = 'd'
lista.append(a)
lista.append(b)
lista.append(c)
lista.append(d)
lista.observer = MagicMock()
b_index = 1
self.assertEqual(b, lista.pop(b_index))
self.assertEqual(3, len(lista))
lista.observer.assert_called_once_with(UpdateType.DELETED, b, b_index)
def test__setitem__(self):
lista = ObservableList()
lista.append(1)
lista.append(2)
lista.append(3)
lista.observer = MagicMock()
index = 1
old_value = lista[index]
new_value = 4
lista[index] = new_value
lista.observer.assert_called_once_with(UpdateType.UPDATED, new_value, index, old=old_value)
def test__setitem_equal__(self):
lista = ObservableList()
lista.append(1)
lista.append(2)
lista.append(3)
lista.observer = MagicMock()
lista[1] = 2
lista.observer.assert_not_called()
def test__delitem__(self):
lista = ObservableList()
lista.append(123)
lista.append(456)
lista.append(789)
lista.observer = MagicMock()
index = 1
del lista[index]
lista.observer.assert_called_once_with(UpdateType.DELETED, 456, index)
def test_contains(self):
lista = ObservableList()
lista.append(123)
lista.append(456)
lista.append(789)
self.assertTrue(123 in lista)
self.assertTrue(456 in lista)
self.assertTrue(789 in lista)
self.assertFalse(987 in lista)
self.assertTrue(987 not in lista)
def test_swap(self):
a = {'key': 'value'}
b = {'key2': 'value2'}
lista = ObservableList()
lista.append(a)
lista.append(b)
lista.observer = MagicMock()
lista[0], lista[1] = lista[1], lista[0]
self.assertEqual(b, lista[0])
self.assertEqual(a, lista[1])
expected = [
call(UpdateType.UPDATED, lista[0], 0, old=lista[1]),
call(UpdateType.UPDATED, lista[1], 1, old=lista[0])
]
self.assertListEqual(expected, lista.observer.call_args_list)
def test_swap_2(self):
a = {'key': 'value'}
b = {'key2': 'value2'}
lista = ObservableList()
lista.append(a)
listb = ObservableList()
listb.append(b)
lista.observer = MagicMock()
listb.observer = MagicMock()
lista[0], listb[0] = listb[0], lista[0]
self.assertEqual(b, lista[0])
self.assertEqual(a, listb[0])
lista.observer.assert_called_once_with(UpdateType.UPDATED, lista[0], 0, old=listb[0])
listb.observer.assert_called_once_with(UpdateType.UPDATED, listb[0], 0, old=lista[0])
def test_move(self):
a = {'key': 'value'}
b = {'key2': 'value2'}
c = 4
d = 23
lista = ObservableList()
lista.append(a)
lista.append(b)
lista.append(c)
lista.append(d)
lista.observer = MagicMock()
new_index = 2
old_index = lista.index(a)
lista.move(a, new_index)
self.assertEqual(lista.index(a), new_index)
self.assertListEqual([b, c, a, d], list(lista))
expected = [
call(UpdateType.DELETED, a, old_index),
call(UpdateType.CREATED, a, new_index)
]
self.assertListEqual(expected, lista.observer.call_args_list)
def test_move_same_index(self):
a = {'key': 'value'}
b = {'key2': 'value2'}
c = 4
d = 23
lista = ObservableList()
lista.append(a)
lista.append(b)
lista.append(c)
lista.append(d)
lista.observer = MagicMock()
same_index = lista.index(a)
lista.move(a, same_index)
lista.observer.assert_not_called()
| {
"content_hash": "5b3219b7036f1c6d1bdb3d305ede8876",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 99,
"avg_line_length": 24.308035714285715,
"alnum_prop": 0.5651056014692378,
"repo_name": "PedalPi/PluginsManager",
"id": "ce666f9ab24b4125a4188a10dac96523afe2f987",
"size": "6022",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/observer/observable_list_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2251"
},
{
"name": "Python",
"bytes": "352748"
}
],
"symlink_target": ""
} |
from webapp2 import RequestHandler, cached_property
from webapp2_extras.sessions import get_store
from models.user import User
class SessionHandler(RequestHandler):
# Override dispatch method to get and save session data with every request
def dispatch(self):
self.session_store = get_store(request=self.request)
try:
RequestHandler.dispatch(self)
finally:
self.session_store.save_sessions(self.response)
# Make session data available within handlers
@cached_property
def session(self):
return self.session_store.get_session()
def current_user(self, user=None):
if user:
self.session['current_user'] = user.google_id
if 'current_user' in self.session:
return User.get_by_google_id(self.session['current_user'])
else:
return None | {
"content_hash": "bab0faed1c7da7bd5ea470b7731e0462",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 78,
"avg_line_length": 31.214285714285715,
"alnum_prop": 0.6681922196796338,
"repo_name": "opendatapress/open_data_press",
"id": "855149511719bda801c2464d8aa233ad5e5321ea",
"size": "1309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "helpers/sessions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1923"
},
{
"name": "HTML",
"bytes": "33358"
},
{
"name": "JavaScript",
"bytes": "108789"
},
{
"name": "Python",
"bytes": "592004"
}
],
"symlink_target": ""
} |
import datetime
from typing import Dict, Optional
import msrest.serialization
class CloudEvent(msrest.serialization.Model):
"""Properties of an event published to an Event Grid topic using the CloudEvent 1.0 Schema.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param id: Required. An identifier for the event. The combination of id and source must be
unique for each distinct event.
:type id: str
:param source: Required. Identifies the context in which an event happened. The combination of
id and source must be unique for each distinct event.
:type source: str
:param data: Event data specific to the event type.
:type data: object
:param data_base64: Event data specific to the event type, encoded as a base64 string.
:type data_base64: bytearray
:param type: Required. Type of event related to the originating occurrence.
:type type: str
:param time: The time (in UTC) the event was generated, in RFC3339 format.
:type time: ~datetime.datetime
:param specversion: Required. The version of the CloudEvents specification which the event
uses.
:type specversion: str
:param dataschema: Identifies the schema that data adheres to.
:type dataschema: str
:param datacontenttype: Content type of data value.
:type datacontenttype: str
:param subject: This describes the subject of the event in the context of the event producer
(identified by source).
:type subject: str
"""
_validation = {
'id': {'required': True},
'source': {'required': True},
'type': {'required': True},
'specversion': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'id': {'key': 'id', 'type': 'str'},
'source': {'key': 'source', 'type': 'str'},
'data': {'key': 'data', 'type': 'object'},
'data_base64': {'key': 'data_base64', 'type': 'bytearray'},
'type': {'key': 'type', 'type': 'str'},
'time': {'key': 'time', 'type': 'iso-8601'},
'specversion': {'key': 'specversion', 'type': 'str'},
'dataschema': {'key': 'dataschema', 'type': 'str'},
'datacontenttype': {'key': 'datacontenttype', 'type': 'str'},
'subject': {'key': 'subject', 'type': 'str'},
}
def __init__(
self,
*,
id: str,
source: str,
type: str,
specversion: str,
additional_properties: Optional[Dict[str, object]] = None,
data: Optional[object] = None,
data_base64: Optional[bytearray] = None,
time: Optional[datetime.datetime] = None,
dataschema: Optional[str] = None,
datacontenttype: Optional[str] = None,
subject: Optional[str] = None,
**kwargs
):
super(CloudEvent, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.id = id
self.source = source
self.data = data
self.data_base64 = data_base64
self.type = type
self.time = time
self.specversion = specversion
self.dataschema = dataschema
self.datacontenttype = datacontenttype
self.subject = subject
class EventGridEvent(msrest.serialization.Model):
"""Properties of an event published to an Event Grid topic using the EventGrid Schema.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Required. An unique identifier for the event.
:type id: str
:param topic: The resource path of the event source.
:type topic: str
:param subject: Required. A resource path relative to the topic path.
:type subject: str
:param data: Required. Event data specific to the event type.
:type data: object
:param event_type: Required. The type of the event that occurred.
:type event_type: str
:param event_time: Required. The time (in UTC) the event was generated.
:type event_time: ~datetime.datetime
:ivar metadata_version: The schema version of the event metadata.
:vartype metadata_version: str
:param data_version: Required. The schema version of the data object.
:type data_version: str
"""
_validation = {
'id': {'required': True},
'subject': {'required': True},
'data': {'required': True},
'event_type': {'required': True},
'event_time': {'required': True},
'metadata_version': {'readonly': True},
'data_version': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'topic': {'key': 'topic', 'type': 'str'},
'subject': {'key': 'subject', 'type': 'str'},
'data': {'key': 'data', 'type': 'object'},
'event_type': {'key': 'eventType', 'type': 'str'},
'event_time': {'key': 'eventTime', 'type': 'iso-8601'},
'metadata_version': {'key': 'metadataVersion', 'type': 'str'},
'data_version': {'key': 'dataVersion', 'type': 'str'},
}
def __init__(
self,
*,
id: str,
subject: str,
data: object,
event_type: str,
event_time: datetime.datetime,
data_version: str,
topic: Optional[str] = None,
**kwargs
):
super(EventGridEvent, self).__init__(**kwargs)
self.id = id
self.topic = topic
self.subject = subject
self.data = data
self.event_type = event_type
self.event_time = event_time
self.metadata_version = None
self.data_version = data_version
| {
"content_hash": "ce19151dda5890ff7d95aa335d1aed87",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 98,
"avg_line_length": 37.18987341772152,
"alnum_prop": 0.6089176310415249,
"repo_name": "Azure/azure-sdk-for-python",
"id": "782a90f1ad00f2dad5c1fa4dde6f8e7d461c608f",
"size": "6344",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/eventgrid/azure-eventgrid/azure/eventgrid/_generated/models/_models_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import string
from solvertools.wordlist import WORDS
from itertools import cycle
ASCII_a = 97
def shift_letter(char, offset):
if char not in string.ascii_letters:
return char
idx = ord(char.lower()) - ASCII_a
new_idx = (idx + offset) % 26
new_char = chr(ASCII_a + new_idx)
if char.isupper():
return new_char.upper()
else:
return new_char
def caesar_shift(text, offset):
"""
Performs a Caesar shift by the given offset.
If the offset is a letter, it will look it up in the alphabet to convert
it to a shift. (For example, a shift of 'C' means that 'A' goes to 'C',
which is the same as a shift of 2.)
>>> print(caesar_shift('caesar', 13))
pnrfne
>>> print(caesar_shift('CAESAR', 'C'))
ECGUCT
"""
if isinstance(offset, str):
offset = ord(offset.lower()) - ASCII_a
shifted = [shift_letter(ch, offset) for ch in text]
return ''.join(shifted)
def caesar_unshift(text, offset):
"""
Performs a Caesar shift backwards by the given offset.
If the offset is a letter, it will look it up in the alphabet to convert
it to a shift. (For example, a shift of 'C' means that 'C' goes to 'A',
which is the same as a backward shift of 2.)
>>> print(caesar_unshift('DBFTBS TIJGU', 1))
CAESAR SHIFT
"""
if isinstance(offset, str):
offset = ord(offset.lower()) - ASCII_a
return caesar_shift(text, -offset)
def best_caesar_shift(text, wordlist=WORDS, count=5):
"""
Find the most cromulent Caesar shift of a ciphertext.
"""
possibilities = [caesar_shift(text, n) for n in range(26)]
results = []
for n, poss in enumerate(possibilities):
results.extend([found + (n,) for found in wordlist.search(poss)])
return wordlist.show_best_results(results, count=count)
def vigenere_encode(text, key, one_based=False):
"""
Apply the Vigenere cipher to `text`, with `key` as the key.
In this cipher, A + A = A, but in many cases in the Mystery Hunt,
A + A = B. To get the A + A = B behavior, set `one_based` to true.
>>> vigenere_encode('ABRACADABRA', 'abc')
'ACTADCDBDRB'
>>> vigenere_encode('ABRACADABRA', 'abc', one_based=True)
'BDUBEDECESC'
"""
shifted = []
letters = [ch for ch in text if ch in string.ascii_letters]
shifted = [caesar_shift(ch, shift)
for (ch, shift) in zip(letters, cycle(key))]
result = ''.join(shifted)
if one_based:
result = caesar_shift(result, 1)
return result
def vigenere_decode(text, key, one_based=False):
"""
Decode a Vigenere cipher on `text`, with `key` as the key.
In this cipher, B - B = A, but in many cases in the Mystery Hunt,
B - B = Z. To get the B - B = Z behavior, set `one_based` to true.
>>> vigenere_decode(vigenere_encode('ABRACADABRA', 'abc'), 'abc')
'ABRACADABRA'
"""
shifted = []
letters = [ch for ch in text if ch in string.ascii_letters]
shifted = [caesar_unshift(ch, shift)
for (ch, shift) in zip(letters, cycle(key))]
result = ''.join(shifted)
if one_based:
result = caesar_shift(result, -1)
return result
| {
"content_hash": "8340d31aaf58ebd6275abf953f45cbb5",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 76,
"avg_line_length": 30.50943396226415,
"alnum_prop": 0.6150278293135436,
"repo_name": "rspeer/solvertools",
"id": "f6a9e01b2db4ffada037d30263781c8c4ae2c685",
"size": "3234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "solvertools/ciphers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7485"
},
{
"name": "JavaScript",
"bytes": "108752"
},
{
"name": "Makefile",
"bytes": "3359"
},
{
"name": "Python",
"bytes": "85102"
},
{
"name": "Shell",
"bytes": "257"
}
],
"symlink_target": ""
} |
"""Run a private event loop and then throw a fatal error in it to verify that
we shut down cleanly and don't lose the error.
"""
import thingflow.filters.output
from thingflow.base import Scheduler, OutputThing, EventLoopOutputThingMixin, FatalError
import unittest
import asyncio
s = Scheduler(asyncio.get_event_loop())
import time
class TestOutputThing(OutputThing, EventLoopOutputThingMixin):
def __init__(self):
super().__init__()
def _observe_event_loop(self):
print("starting event loop")
for i in range(4):
if self.stop_requested:
break
self._dispatch_next(i)
time.sleep(1)
raise FatalError("testing the fatal error")
class TestFatalErrorInPrivateLoop(unittest.TestCase):
def test_case(self):
m = TestOutputThing()
m.output()
c = s.schedule_on_private_event_loop(m)
m.print_downstream()
try:
s.run_forever()
except FatalError:
print("we got the fatal error as expected")
else:
print("The event loop exited without throwing a fatal error!")
self.assertFalse(1, "The event loop exited without throwing a fatal error!")
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "16b2d1621d71cb638c8bbfaf9214bab5",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 88,
"avg_line_length": 29.09090909090909,
"alnum_prop": 0.63671875,
"repo_name": "mpi-sws-rse/thingflow-python",
"id": "43abc7afb8e3ae790a7af251b41f44aaccebfc72",
"size": "1372",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_fatal_error_in_private_loop.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "382"
},
{
"name": "Python",
"bytes": "290455"
},
{
"name": "Shell",
"bytes": "6604"
}
],
"symlink_target": ""
} |
import os
from distutils.core import setup
setup(
name='fSequenceTools',
version='1.0.0',
description='Sequence numeric encoding and report building tools',
long_description=\
'''
==============
fSequenceTools
==============
This package was created to investigate two separate aspects of sequences
analysis:
Encoding time
By using threading the reading job is split between reading data,
detecting formats, and encoding sequence data. The structure is fully
extensible to cover more formats and new types of encoders.
Reporting
Reports are put together in groups so that secondary analysis of
encoded data can be shared by a group of different ploting and statistics
producers.
''',
author='Martin Zackrisson',
author_email='martin.zackrisson@gu.se',
url='https://gitorious.org/fseq',
packages=['fseq', 'fseq.reading', 'fseq.reporting'],
licence='MIT',
scripts=[os.path.join("scripts", p) for p in ("fseq",)],
requires=['numpy', 'scipy', 'matplotlib'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX',
'Operating System :: MacOS',
'Operating System :: Microsoft',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: Scientific/Engineering :: Bio-Informatics',
]
)
| {
"content_hash": "2b121927a0b6b76a2d31740a67dd655c",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 77,
"avg_line_length": 31.625,
"alnum_prop": 0.6521739130434783,
"repo_name": "local-minimum/fseq",
"id": "cefe30a55f6277f38ec712cd15bebc7cb95307cf",
"size": "1541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "108367"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from .views import *
urlpatterns = [
url(r'^browse-entries/$', BrowseEintrag.as_view(), name='browse_entries'),
url(r'^browse-baende/$', BrowseBand.as_view(), name='browse_baende'),
url(r'^browse-archivs/$', BrowseArchiv.as_view(), name='browse_archivs'),
url(r'^browse-institutions/$', BrowseInstitution.as_view(), name='browse_institutions'),
url(r'^browse-persons/$', BrowsePerson.as_view(), name='browse_persons'),
url(r'^browse-bearbeiter/$', BrowseBearbeiter.as_view(), name='browse_bearbeiter'),
]
| {
"content_hash": "365a4b194efe439f3c07d3ee5226e88f",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 92,
"avg_line_length": 51.18181818181818,
"alnum_prop": 0.6873889875666075,
"repo_name": "acdh-oeaw/vhioe",
"id": "350be3765e66445a402a77c8650ea20ec80c42d0",
"size": "563",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "browsing/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "25850"
},
{
"name": "HTML",
"bytes": "105907"
},
{
"name": "JavaScript",
"bytes": "220270"
},
{
"name": "Python",
"bytes": "91715"
}
],
"symlink_target": ""
} |
import proto # type: ignore
from google.ads.googleads.v11.common.types import criteria
from google.ads.googleads.v11.common.types import feed_common
from google.ads.googleads.v11.enums.types import (
call_conversion_reporting_state as gage_call_conversion_reporting_state,
)
from google.ads.googleads.v11.enums.types import (
call_to_action_type as gage_call_to_action_type,
)
from google.ads.googleads.v11.enums.types import lead_form_call_to_action_type
from google.ads.googleads.v11.enums.types import lead_form_desired_intent
from google.ads.googleads.v11.enums.types import lead_form_field_user_input_type
from google.ads.googleads.v11.enums.types import (
lead_form_post_submit_call_to_action_type,
)
from google.ads.googleads.v11.enums.types import mime_type as gage_mime_type
from google.ads.googleads.v11.enums.types import mobile_app_vendor
from google.ads.googleads.v11.enums.types import price_extension_price_qualifier
from google.ads.googleads.v11.enums.types import price_extension_price_unit
from google.ads.googleads.v11.enums.types import price_extension_type
from google.ads.googleads.v11.enums.types import (
promotion_extension_discount_modifier,
)
from google.ads.googleads.v11.enums.types import promotion_extension_occasion
__protobuf__ = proto.module(
package="google.ads.googleads.v11.common",
marshal="google.ads.googleads.v11",
manifest={
"YoutubeVideoAsset",
"MediaBundleAsset",
"ImageAsset",
"ImageDimension",
"TextAsset",
"LeadFormAsset",
"LeadFormField",
"LeadFormCustomQuestionField",
"LeadFormSingleChoiceAnswers",
"LeadFormDeliveryMethod",
"WebhookDelivery",
"BookOnGoogleAsset",
"PromotionAsset",
"CalloutAsset",
"StructuredSnippetAsset",
"SitelinkAsset",
"PageFeedAsset",
"DynamicEducationAsset",
"MobileAppAsset",
"HotelCalloutAsset",
"CallAsset",
"PriceAsset",
"PriceOffering",
"CallToActionAsset",
"DynamicRealEstateAsset",
"DynamicCustomAsset",
"DynamicHotelsAndRentalsAsset",
"DynamicFlightsAsset",
"DiscoveryCarouselCardAsset",
"DynamicTravelAsset",
"DynamicLocalAsset",
"DynamicJobsAsset",
},
)
class YoutubeVideoAsset(proto.Message):
r"""A YouTube asset.
Attributes:
youtube_video_id (str):
YouTube video id. This is the 11 character
string value used in the YouTube video URL.
This field is a member of `oneof`_ ``_youtube_video_id``.
youtube_video_title (str):
YouTube video title.
"""
youtube_video_id = proto.Field(proto.STRING, number=2, optional=True,)
youtube_video_title = proto.Field(proto.STRING, number=3,)
class MediaBundleAsset(proto.Message):
r"""A MediaBundle asset.
Attributes:
data (bytes):
Media bundle (ZIP file) asset data. The
format of the uploaded ZIP file depends on the
ad field where it will be used. For more
information on the format, see the documentation
of the ad field where you plan on using the
MediaBundleAsset. This field is mutate only.
This field is a member of `oneof`_ ``_data``.
"""
data = proto.Field(proto.BYTES, number=2, optional=True,)
class ImageAsset(proto.Message):
r"""An Image asset.
Attributes:
data (bytes):
The raw bytes data of an image. This field is
mutate only.
This field is a member of `oneof`_ ``_data``.
file_size (int):
File size of the image asset in bytes.
This field is a member of `oneof`_ ``_file_size``.
mime_type (google.ads.googleads.v11.enums.types.MimeTypeEnum.MimeType):
MIME type of the image asset.
full_size (google.ads.googleads.v11.common.types.ImageDimension):
Metadata for this image at its original size.
"""
data = proto.Field(proto.BYTES, number=5, optional=True,)
file_size = proto.Field(proto.INT64, number=6, optional=True,)
mime_type = proto.Field(
proto.ENUM, number=3, enum=gage_mime_type.MimeTypeEnum.MimeType,
)
full_size = proto.Field(proto.MESSAGE, number=4, message="ImageDimension",)
class ImageDimension(proto.Message):
r"""Metadata for an image at a certain size, either original or
resized.
Attributes:
height_pixels (int):
Height of the image.
This field is a member of `oneof`_ ``_height_pixels``.
width_pixels (int):
Width of the image.
This field is a member of `oneof`_ ``_width_pixels``.
url (str):
A URL that returns the image with this height
and width.
This field is a member of `oneof`_ ``_url``.
"""
height_pixels = proto.Field(proto.INT64, number=4, optional=True,)
width_pixels = proto.Field(proto.INT64, number=5, optional=True,)
url = proto.Field(proto.STRING, number=6, optional=True,)
class TextAsset(proto.Message):
r"""A Text asset.
Attributes:
text (str):
Text content of the text asset.
This field is a member of `oneof`_ ``_text``.
"""
text = proto.Field(proto.STRING, number=2, optional=True,)
class LeadFormAsset(proto.Message):
r"""A Lead Form asset.
Attributes:
business_name (str):
Required. The name of the business being
advertised.
call_to_action_type (google.ads.googleads.v11.enums.types.LeadFormCallToActionTypeEnum.LeadFormCallToActionType):
Required. Pre-defined display text that
encourages user to expand the form.
call_to_action_description (str):
Required. Text giving a clear value
proposition of what users expect once they
expand the form.
headline (str):
Required. Headline of the expanded form to
describe what the form is asking for or
facilitating.
description (str):
Required. Detailed description of the
expanded form to describe what the form is
asking for or facilitating.
privacy_policy_url (str):
Required. Link to a page describing the
policy on how the collected data is handled by
the advertiser/business.
post_submit_headline (str):
Headline of text shown after form submission
that describes how the advertiser will follow up
with the user.
This field is a member of `oneof`_ ``_post_submit_headline``.
post_submit_description (str):
Detailed description shown after form
submission that describes how the advertiser
will follow up with the user.
This field is a member of `oneof`_ ``_post_submit_description``.
fields (Sequence[google.ads.googleads.v11.common.types.LeadFormField]):
Ordered list of input fields.
custom_question_fields (Sequence[google.ads.googleads.v11.common.types.LeadFormCustomQuestionField]):
Ordered list of custom question fields.
delivery_methods (Sequence[google.ads.googleads.v11.common.types.LeadFormDeliveryMethod]):
Configured methods for collected lead data to
be delivered to advertiser. Only one method
typed as WebhookDelivery can be configured.
post_submit_call_to_action_type (google.ads.googleads.v11.enums.types.LeadFormPostSubmitCallToActionTypeEnum.LeadFormPostSubmitCallToActionType):
Pre-defined display text that encourages user
action after the form is submitted.
background_image_asset (str):
Asset resource name of the background image.
The minimum size is 600x314 and the aspect ratio
must be 1.91:1 (+-1%).
This field is a member of `oneof`_ ``_background_image_asset``.
desired_intent (google.ads.googleads.v11.enums.types.LeadFormDesiredIntentEnum.LeadFormDesiredIntent):
Chosen intent for the lead form, for example,
more volume or more qualified.
custom_disclosure (str):
Custom disclosure shown along with Google
disclaimer on the lead form. Accessible to
allowed customers only.
This field is a member of `oneof`_ ``_custom_disclosure``.
"""
business_name = proto.Field(proto.STRING, number=10,)
call_to_action_type = proto.Field(
proto.ENUM,
number=17,
enum=lead_form_call_to_action_type.LeadFormCallToActionTypeEnum.LeadFormCallToActionType,
)
call_to_action_description = proto.Field(proto.STRING, number=18,)
headline = proto.Field(proto.STRING, number=12,)
description = proto.Field(proto.STRING, number=13,)
privacy_policy_url = proto.Field(proto.STRING, number=14,)
post_submit_headline = proto.Field(proto.STRING, number=15, optional=True,)
post_submit_description = proto.Field(
proto.STRING, number=16, optional=True,
)
fields = proto.RepeatedField(
proto.MESSAGE, number=8, message="LeadFormField",
)
custom_question_fields = proto.RepeatedField(
proto.MESSAGE, number=23, message="LeadFormCustomQuestionField",
)
delivery_methods = proto.RepeatedField(
proto.MESSAGE, number=9, message="LeadFormDeliveryMethod",
)
post_submit_call_to_action_type = proto.Field(
proto.ENUM,
number=19,
enum=lead_form_post_submit_call_to_action_type.LeadFormPostSubmitCallToActionTypeEnum.LeadFormPostSubmitCallToActionType,
)
background_image_asset = proto.Field(
proto.STRING, number=20, optional=True,
)
desired_intent = proto.Field(
proto.ENUM,
number=21,
enum=lead_form_desired_intent.LeadFormDesiredIntentEnum.LeadFormDesiredIntent,
)
custom_disclosure = proto.Field(proto.STRING, number=22, optional=True,)
class LeadFormField(proto.Message):
r"""One input field instance within a form.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
input_type (google.ads.googleads.v11.enums.types.LeadFormFieldUserInputTypeEnum.LeadFormFieldUserInputType):
Describes the input type, which may be a
predefined type such as "full name" or a
pre-vetted question like "Do you own a car?".
single_choice_answers (google.ads.googleads.v11.common.types.LeadFormSingleChoiceAnswers):
Answer configuration for a single choice
question. Can be set only for pre-vetted
question fields. Minimum of 2 answers required
and maximum of 12 allowed.
This field is a member of `oneof`_ ``answers``.
"""
input_type = proto.Field(
proto.ENUM,
number=1,
enum=lead_form_field_user_input_type.LeadFormFieldUserInputTypeEnum.LeadFormFieldUserInputType,
)
single_choice_answers = proto.Field(
proto.MESSAGE,
number=2,
oneof="answers",
message="LeadFormSingleChoiceAnswers",
)
class LeadFormCustomQuestionField(proto.Message):
r"""One custom question input field instance within a form.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
custom_question_text (str):
The exact custom question field text (for
example, "Do you own a car?").
single_choice_answers (google.ads.googleads.v11.common.types.LeadFormSingleChoiceAnswers):
Answer configuration for a single choice
question. Minimum of 2 answers and maximum of 12
allowed.
This field is a member of `oneof`_ ``answers``.
"""
custom_question_text = proto.Field(proto.STRING, number=1,)
single_choice_answers = proto.Field(
proto.MESSAGE,
number=2,
oneof="answers",
message="LeadFormSingleChoiceAnswers",
)
class LeadFormSingleChoiceAnswers(proto.Message):
r"""Defines possible answers for a single choice question,
usually presented as a single-choice drop-down list.
Attributes:
answers (Sequence[str]):
List of choices for a single question field.
The order of entries defines UI order. Minimum
of 2 answers required and maximum of 12 allowed.
"""
answers = proto.RepeatedField(proto.STRING, number=1,)
class LeadFormDeliveryMethod(proto.Message):
r"""A configuration of how leads are delivered to the advertiser.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
webhook (google.ads.googleads.v11.common.types.WebhookDelivery):
Webhook method of delivery.
This field is a member of `oneof`_ ``delivery_details``.
"""
webhook = proto.Field(
proto.MESSAGE,
number=1,
oneof="delivery_details",
message="WebhookDelivery",
)
class WebhookDelivery(proto.Message):
r"""Google notifies the advertiser of leads by making HTTP calls
to an endpoint they specify. The requests contain JSON matching
a schema that Google publishes as part of form ads
documentation.
Attributes:
advertiser_webhook_url (str):
Webhook url specified by advertiser to send
the lead.
This field is a member of `oneof`_ ``_advertiser_webhook_url``.
google_secret (str):
Anti-spoofing secret set by the advertiser as
part of the webhook payload.
This field is a member of `oneof`_ ``_google_secret``.
payload_schema_version (int):
The schema version that this delivery
instance will use.
This field is a member of `oneof`_ ``_payload_schema_version``.
"""
advertiser_webhook_url = proto.Field(proto.STRING, number=4, optional=True,)
google_secret = proto.Field(proto.STRING, number=5, optional=True,)
payload_schema_version = proto.Field(proto.INT64, number=6, optional=True,)
class BookOnGoogleAsset(proto.Message):
r"""A Book on Google asset. Used to redirect user to book through
Google. Book on Google will change the redirect url to book
directly through Google.
"""
class PromotionAsset(proto.Message):
r"""A Promotion asset.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
promotion_target (str):
Required. A freeform description of what the
promotion is targeting.
discount_modifier (google.ads.googleads.v11.enums.types.PromotionExtensionDiscountModifierEnum.PromotionExtensionDiscountModifier):
A modifier for qualification of the discount.
redemption_start_date (str):
Start date of when the promotion is eligible
to be redeemed, in yyyy-MM-dd format.
redemption_end_date (str):
Last date of when the promotion is eligible
to be redeemed, in yyyy-MM-dd format.
occasion (google.ads.googleads.v11.enums.types.PromotionExtensionOccasionEnum.PromotionExtensionOccasion):
The occasion the promotion was intended for.
If an occasion is set, the redemption window
will need to fall within the date range
associated with the occasion.
language_code (str):
The language of the promotion.
Represented as BCP 47 language tag.
start_date (str):
Start date of when this asset is effective
and can begin serving, in yyyy-MM-dd format.
end_date (str):
Last date of when this asset is effective and
still serving, in yyyy-MM-dd format.
ad_schedule_targets (Sequence[google.ads.googleads.v11.common.types.AdScheduleInfo]):
List of non-overlapping schedules specifying
all time intervals for which the asset may
serve. There can be a maximum of 6 schedules per
day, 42 in total.
percent_off (int):
Percentage off discount in the promotion. 1,000,000 = 100%.
Either this or money_amount_off is required.
This field is a member of `oneof`_ ``discount_type``.
money_amount_off (google.ads.googleads.v11.common.types.Money):
Money amount off for discount in the promotion. Either this
or percent_off is required.
This field is a member of `oneof`_ ``discount_type``.
promotion_code (str):
A code the user should use in order to be
eligible for the promotion.
This field is a member of `oneof`_ ``promotion_trigger``.
orders_over_amount (google.ads.googleads.v11.common.types.Money):
The amount the total order needs to be for
the user to be eligible for the promotion.
This field is a member of `oneof`_ ``promotion_trigger``.
"""
promotion_target = proto.Field(proto.STRING, number=1,)
discount_modifier = proto.Field(
proto.ENUM,
number=2,
enum=promotion_extension_discount_modifier.PromotionExtensionDiscountModifierEnum.PromotionExtensionDiscountModifier,
)
redemption_start_date = proto.Field(proto.STRING, number=7,)
redemption_end_date = proto.Field(proto.STRING, number=8,)
occasion = proto.Field(
proto.ENUM,
number=9,
enum=promotion_extension_occasion.PromotionExtensionOccasionEnum.PromotionExtensionOccasion,
)
language_code = proto.Field(proto.STRING, number=10,)
start_date = proto.Field(proto.STRING, number=11,)
end_date = proto.Field(proto.STRING, number=12,)
ad_schedule_targets = proto.RepeatedField(
proto.MESSAGE, number=13, message=criteria.AdScheduleInfo,
)
percent_off = proto.Field(proto.INT64, number=3, oneof="discount_type",)
money_amount_off = proto.Field(
proto.MESSAGE,
number=4,
oneof="discount_type",
message=feed_common.Money,
)
promotion_code = proto.Field(
proto.STRING, number=5, oneof="promotion_trigger",
)
orders_over_amount = proto.Field(
proto.MESSAGE,
number=6,
oneof="promotion_trigger",
message=feed_common.Money,
)
class CalloutAsset(proto.Message):
r"""A Callout asset.
Attributes:
callout_text (str):
Required. The callout text.
The length of this string should be between 1
and 25, inclusive.
start_date (str):
Start date of when this asset is effective
and can begin serving, in yyyy-MM-dd format.
end_date (str):
Last date of when this asset is effective and
still serving, in yyyy-MM-dd format.
ad_schedule_targets (Sequence[google.ads.googleads.v11.common.types.AdScheduleInfo]):
List of non-overlapping schedules specifying
all time intervals for which the asset may
serve. There can be a maximum of 6 schedules per
day, 42 in total.
"""
callout_text = proto.Field(proto.STRING, number=1,)
start_date = proto.Field(proto.STRING, number=2,)
end_date = proto.Field(proto.STRING, number=3,)
ad_schedule_targets = proto.RepeatedField(
proto.MESSAGE, number=4, message=criteria.AdScheduleInfo,
)
class StructuredSnippetAsset(proto.Message):
r"""A Structured Snippet asset.
Attributes:
header (str):
Required. The header of the snippet.
This string should be one of the predefined
values at
https://developers.google.com/google-ads/api/reference/data/structured-snippet-headers
values (Sequence[str]):
Required. The values in the snippet.
The size of this collection should be between 3
and 10, inclusive. The length of each value
should be between 1 and 25 characters,
inclusive.
"""
header = proto.Field(proto.STRING, number=1,)
values = proto.RepeatedField(proto.STRING, number=2,)
class SitelinkAsset(proto.Message):
r"""A Sitelink asset.
Attributes:
link_text (str):
Required. URL display text for the sitelink.
The length of this string should be between 1
and 25, inclusive.
description1 (str):
First line of the description for the
sitelink. If set, the length should be between 1
and 35, inclusive, and description2 must also be
set.
description2 (str):
Second line of the description for the
sitelink. If set, the length should be between 1
and 35, inclusive, and description1 must also be
set.
start_date (str):
Start date of when this asset is effective
and can begin serving, in yyyy-MM-dd format.
end_date (str):
Last date of when this asset is effective and
still serving, in yyyy-MM-dd format.
ad_schedule_targets (Sequence[google.ads.googleads.v11.common.types.AdScheduleInfo]):
List of non-overlapping schedules specifying
all time intervals for which the asset may
serve. There can be a maximum of 6 schedules per
day, 42 in total.
"""
link_text = proto.Field(proto.STRING, number=1,)
description1 = proto.Field(proto.STRING, number=2,)
description2 = proto.Field(proto.STRING, number=3,)
start_date = proto.Field(proto.STRING, number=4,)
end_date = proto.Field(proto.STRING, number=5,)
ad_schedule_targets = proto.RepeatedField(
proto.MESSAGE, number=6, message=criteria.AdScheduleInfo,
)
class PageFeedAsset(proto.Message):
r"""A Page Feed asset.
Attributes:
page_url (str):
Required. The webpage that advertisers want
to target.
labels (Sequence[str]):
Labels used to group the page urls.
"""
page_url = proto.Field(proto.STRING, number=1,)
labels = proto.RepeatedField(proto.STRING, number=2,)
class DynamicEducationAsset(proto.Message):
r"""A Dynamic Education asset.
Attributes:
program_id (str):
Required. Program ID which can be any
sequence of letters and digits, and must be
unique and match the values of remarketing tag.
Required.
location_id (str):
Location ID which can be any sequence of
letters and digits and must be unique.
program_name (str):
Required. Program name, for example, Nursing.
Required.
subject (str):
Subject of study, for example, Health.
program_description (str):
Program description, for example, Nursing
Certification.
school_name (str):
School name, for example, Mountain View
School of Nursing.
address (str):
School address which can be specified in one
of the following formats. (1) City, state, code,
country, for example, Mountain View, CA, USA.
(2) Full address, for example, 123 Boulevard St,
Mountain View, CA 94043. (3) Latitude-longitude
in the DDD format, for example, 41.40338,
2.17403
contextual_keywords (Sequence[str]):
Contextual keywords, for example, Nursing
certification, Health, Mountain View.
android_app_link (str):
Android deep link, for example,
android-app://com.example.android/http/example.com/gizmos?1234.
similar_program_ids (Sequence[str]):
Similar program IDs.
ios_app_link (str):
iOS deep link, for example,
exampleApp://content/page.
ios_app_store_id (int):
iOS app store ID. This is used to check if the user has the
app installed on their device before deep linking. If this
field is set, then the ios_app_link field must also be
present.
thumbnail_image_url (str):
Thumbnail image url, for example,
http://www.example.com/thumbnail.png. The
thumbnail image will not be uploaded as image
asset.
image_url (str):
Image url, for example,
http://www.example.com/image.png. The image will
not be uploaded as image asset.
"""
program_id = proto.Field(proto.STRING, number=1,)
location_id = proto.Field(proto.STRING, number=2,)
program_name = proto.Field(proto.STRING, number=3,)
subject = proto.Field(proto.STRING, number=4,)
program_description = proto.Field(proto.STRING, number=5,)
school_name = proto.Field(proto.STRING, number=6,)
address = proto.Field(proto.STRING, number=7,)
contextual_keywords = proto.RepeatedField(proto.STRING, number=8,)
android_app_link = proto.Field(proto.STRING, number=9,)
similar_program_ids = proto.RepeatedField(proto.STRING, number=10,)
ios_app_link = proto.Field(proto.STRING, number=11,)
ios_app_store_id = proto.Field(proto.INT64, number=12,)
thumbnail_image_url = proto.Field(proto.STRING, number=13,)
image_url = proto.Field(proto.STRING, number=14,)
class MobileAppAsset(proto.Message):
r"""An asset representing a mobile app.
Attributes:
app_id (str):
Required. A string that uniquely identifies a
mobile application. It should just contain the
platform native id, like "com.android.ebay" for
Android or "12345689" for iOS.
app_store (google.ads.googleads.v11.enums.types.MobileAppVendorEnum.MobileAppVendor):
Required. The application store that
distributes this specific app.
link_text (str):
Required. The visible text displayed when the
link is rendered in an ad. The length of this
string should be between 1 and 25, inclusive.
start_date (str):
Start date of when this asset is effective
and can begin serving, in yyyy-MM-dd format.
end_date (str):
Last date of when this asset is effective and
still serving, in yyyy-MM-dd format.
"""
app_id = proto.Field(proto.STRING, number=1,)
app_store = proto.Field(
proto.ENUM,
number=2,
enum=mobile_app_vendor.MobileAppVendorEnum.MobileAppVendor,
)
link_text = proto.Field(proto.STRING, number=3,)
start_date = proto.Field(proto.STRING, number=4,)
end_date = proto.Field(proto.STRING, number=5,)
class HotelCalloutAsset(proto.Message):
r"""An asset representing a hotel callout.
Attributes:
text (str):
Required. The text of the hotel callout
asset. The length of this string should be
between 1 and 25, inclusive.
language_code (str):
Required. The language of the hotel callout.
Represented as BCP 47 language tag.
"""
text = proto.Field(proto.STRING, number=1,)
language_code = proto.Field(proto.STRING, number=2,)
class CallAsset(proto.Message):
r"""A Call asset.
Attributes:
country_code (str):
Required. Two-letter country code of the
phone number. Examples: 'US', 'us'.
phone_number (str):
Required. The advertiser's raw phone number.
Examples: '1234567890', '(123)456-7890'
call_conversion_reporting_state (google.ads.googleads.v11.enums.types.CallConversionReportingStateEnum.CallConversionReportingState):
Indicates whether this CallAsset should use
its own call conversion setting, follow the
account level setting, or disable call
conversion.
call_conversion_action (str):
The conversion action to attribute a call conversion to. If
not set, the default conversion action is used. This field
only has effect if call_conversion_reporting_state is set to
USE_RESOURCE_LEVEL_CALL_CONVERSION_ACTION.
ad_schedule_targets (Sequence[google.ads.googleads.v11.common.types.AdScheduleInfo]):
List of non-overlapping schedules specifying
all time intervals for which the asset may
serve. There can be a maximum of 6 schedules per
day, 42 in total.
"""
country_code = proto.Field(proto.STRING, number=1,)
phone_number = proto.Field(proto.STRING, number=2,)
call_conversion_reporting_state = proto.Field(
proto.ENUM,
number=3,
enum=gage_call_conversion_reporting_state.CallConversionReportingStateEnum.CallConversionReportingState,
)
call_conversion_action = proto.Field(proto.STRING, number=4,)
ad_schedule_targets = proto.RepeatedField(
proto.MESSAGE, number=5, message=criteria.AdScheduleInfo,
)
class PriceAsset(proto.Message):
r"""An asset representing a list of price offers.
Attributes:
type_ (google.ads.googleads.v11.enums.types.PriceExtensionTypeEnum.PriceExtensionType):
Required. The type of the price asset.
price_qualifier (google.ads.googleads.v11.enums.types.PriceExtensionPriceQualifierEnum.PriceExtensionPriceQualifier):
The price qualifier of the price asset.
language_code (str):
Required. The language of the price asset.
Represented as BCP 47 language tag.
price_offerings (Sequence[google.ads.googleads.v11.common.types.PriceOffering]):
The price offerings of the price asset.
The size of this collection should be between 3
and 8, inclusive.
"""
type_ = proto.Field(
proto.ENUM,
number=1,
enum=price_extension_type.PriceExtensionTypeEnum.PriceExtensionType,
)
price_qualifier = proto.Field(
proto.ENUM,
number=2,
enum=price_extension_price_qualifier.PriceExtensionPriceQualifierEnum.PriceExtensionPriceQualifier,
)
language_code = proto.Field(proto.STRING, number=3,)
price_offerings = proto.RepeatedField(
proto.MESSAGE, number=4, message="PriceOffering",
)
class PriceOffering(proto.Message):
r"""A single price offering within a PriceAsset.
Attributes:
header (str):
Required. The header of the price offering.
The length of this string should be between 1
and 25, inclusive.
description (str):
Required. The description of the price
offering. The length of this string should be
between 1 and 25, inclusive.
price (google.ads.googleads.v11.common.types.Money):
Required. The price value of the price
offering.
unit (google.ads.googleads.v11.enums.types.PriceExtensionPriceUnitEnum.PriceExtensionPriceUnit):
The price unit of the price offering.
final_url (str):
Required. The final URL after all cross
domain redirects.
final_mobile_url (str):
The final mobile URL after all cross domain
redirects.
"""
header = proto.Field(proto.STRING, number=1,)
description = proto.Field(proto.STRING, number=2,)
price = proto.Field(proto.MESSAGE, number=3, message=feed_common.Money,)
unit = proto.Field(
proto.ENUM,
number=4,
enum=price_extension_price_unit.PriceExtensionPriceUnitEnum.PriceExtensionPriceUnit,
)
final_url = proto.Field(proto.STRING, number=5,)
final_mobile_url = proto.Field(proto.STRING, number=6,)
class CallToActionAsset(proto.Message):
r"""A call to action asset.
Attributes:
call_to_action (google.ads.googleads.v11.enums.types.CallToActionTypeEnum.CallToActionType):
Call to action.
"""
call_to_action = proto.Field(
proto.ENUM,
number=1,
enum=gage_call_to_action_type.CallToActionTypeEnum.CallToActionType,
)
class DynamicRealEstateAsset(proto.Message):
r"""A dynamic real estate asset.
Attributes:
listing_id (str):
Required. Listing ID which can be any
sequence of letters and digits, and must be
unique and match the values of remarketing tag.
Required.
listing_name (str):
Required. Listing name, for example,
Boulevard Bungalow. Required.
city_name (str):
City name, for example, Mountain View,
California.
description (str):
Description, for example, 3 beds, 2 baths,
1568 sq. ft.
address (str):
Address which can be specified in one of the
following formats. (1) City, state, code,
country, for example, Mountain View, CA, USA.
(2) Full address, for example, 123 Boulevard St,
Mountain View, CA 94043. (3) Latitude-longitude
in the DDD format, for example, 41.40338,
2.17403
price (str):
Price which can be number followed by the
alphabetic currency code, ISO 4217 standard. Use
'.' as the decimal mark, for example, 200,000.00
USD.
image_url (str):
Image URL, for example,
http://www.example.com/image.png. The image will
not be uploaded as image asset.
property_type (str):
Property type, for example, House.
listing_type (str):
Listing type, for example, For sale.
contextual_keywords (Sequence[str]):
Contextual keywords, for example, For sale;
Houses for sale.
formatted_price (str):
Formatted price which can be any characters.
If set, this attribute will be used instead of
'price', for example, Starting at $200,000.00.
android_app_link (str):
Android deep link, for example,
android-app://com.example.android/http/example.com/gizmos?1234.
ios_app_link (str):
iOS deep link, for example,
exampleApp://content/page.
ios_app_store_id (int):
iOS app store ID. This is used to check if the user has the
app installed on their device before deep linking. If this
field is set, then the ios_app_link field must also be
present.
similar_listing_ids (Sequence[str]):
Similar listing IDs.
"""
listing_id = proto.Field(proto.STRING, number=1,)
listing_name = proto.Field(proto.STRING, number=2,)
city_name = proto.Field(proto.STRING, number=3,)
description = proto.Field(proto.STRING, number=4,)
address = proto.Field(proto.STRING, number=5,)
price = proto.Field(proto.STRING, number=6,)
image_url = proto.Field(proto.STRING, number=7,)
property_type = proto.Field(proto.STRING, number=8,)
listing_type = proto.Field(proto.STRING, number=9,)
contextual_keywords = proto.RepeatedField(proto.STRING, number=10,)
formatted_price = proto.Field(proto.STRING, number=11,)
android_app_link = proto.Field(proto.STRING, number=12,)
ios_app_link = proto.Field(proto.STRING, number=13,)
ios_app_store_id = proto.Field(proto.INT64, number=14,)
similar_listing_ids = proto.RepeatedField(proto.STRING, number=15,)
class DynamicCustomAsset(proto.Message):
r"""A dynamic custom asset.
Attributes:
id (str):
Required. ID which can be any sequence of
letters and digits, and must be unique and match
the values of remarketing tag, for example,
sedan. Required.
id2 (str):
ID2 which can be any sequence of letters and
digits, for example, red. ID sequence (ID + ID2)
must be unique.
item_title (str):
Required. Item title, for example, Mid-size
sedan. Required.
item_subtitle (str):
Item subtitle, for example, At your Mountain
View dealership.
item_description (str):
Item description, for example, Best selling
mid-size car.
item_address (str):
Item address which can be specified in one of
the following formats. (1) City, state, code,
country, for example, Mountain View, CA, USA.
(2) Full address, for example, 123 Boulevard St,
Mountain View, CA 94043. (3) Latitude-longitude
in the DDD format, for example, 41.40338,
2.17403
item_category (str):
Item category, for example, Sedans.
price (str):
Price which can be number followed by the
alphabetic currency code, ISO 4217 standard. Use
'.' as the decimal mark, for example, 20,000.00
USD.
sale_price (str):
Sale price which can be number followed by
the alphabetic currency code, ISO 4217 standard.
Use '.' as the decimal mark, for example,
15,000.00 USD. Must be less than the 'price'
field.
formatted_price (str):
Formatted price which can be any characters.
If set, this attribute will be used instead of
'price', for example, Starting at $20,000.00.
formatted_sale_price (str):
Formatted sale price which can be any
characters. If set, this attribute will be used
instead of 'sale price', for example, On sale
for $15,000.00.
image_url (str):
Image URL, for example,
http://www.example.com/image.png. The image will
not be uploaded as image asset.
contextual_keywords (Sequence[str]):
Contextual keywords, for example, Sedans, 4
door sedans.
android_app_link (str):
Android deep link, for example,
android-app://com.example.android/http/example.com/gizmos?1234.
ios_app_link (str):
iOS deep link, for example,
exampleApp://content/page.
ios_app_store_id (int):
iOS app store ID. This is used to check if the user has the
app installed on their device before deep linking. If this
field is set, then the ios_app_link field must also be
present.
similar_ids (Sequence[str]):
Similar IDs.
"""
id = proto.Field(proto.STRING, number=1,)
id2 = proto.Field(proto.STRING, number=2,)
item_title = proto.Field(proto.STRING, number=3,)
item_subtitle = proto.Field(proto.STRING, number=4,)
item_description = proto.Field(proto.STRING, number=5,)
item_address = proto.Field(proto.STRING, number=6,)
item_category = proto.Field(proto.STRING, number=7,)
price = proto.Field(proto.STRING, number=8,)
sale_price = proto.Field(proto.STRING, number=9,)
formatted_price = proto.Field(proto.STRING, number=10,)
formatted_sale_price = proto.Field(proto.STRING, number=11,)
image_url = proto.Field(proto.STRING, number=12,)
contextual_keywords = proto.RepeatedField(proto.STRING, number=13,)
android_app_link = proto.Field(proto.STRING, number=14,)
ios_app_link = proto.Field(proto.STRING, number=16,)
ios_app_store_id = proto.Field(proto.INT64, number=17,)
similar_ids = proto.RepeatedField(proto.STRING, number=15,)
class DynamicHotelsAndRentalsAsset(proto.Message):
r"""A dynamic hotels and rentals asset.
Attributes:
property_id (str):
Required. Property ID which can be any
sequence of letters and digits, and must be
unique and match the values of remarketing tag.
Required.
property_name (str):
Required. Property name, for example,
Mountain View Hotel. Required.
image_url (str):
Image URL, for example,
http://www.example.com/image.png. The image will
not be uploaded as image asset.
destination_name (str):
Destination name, for example, Downtown
Mountain View.
description (str):
Description, for example, Close to SJC
Airport.
price (str):
Price which can be number followed by the
alphabetic currency code, ISO 4217 standard. Use
'.' as the decimal mark, for example, 100.00
USD.
sale_price (str):
ISO 4217 standard. Use '.' as the decimal
mark, for example, 80.00 USD. Must be less than
the 'price' field.
star_rating (int):
Star rating. Must be a number between 1 to 5,
inclusive.
category (str):
Category, for example, Hotel suite.
contextual_keywords (Sequence[str]):
Contextual keywords, for example, Mountain
View "Hotels", South Bay hotels.
address (str):
Address which can be specified in one of the
following formats. (1) City, state, code,
country, for example, Mountain View, CA, USA.
(2) Full address, for example, 123 Boulevard St,
Mountain View, CA 94043. (3) Latitude-longitude
in the DDD format, for example, 41.40338,
2.17403
android_app_link (str):
Android deep link, for example,
android-app://com.example.android/http/example.com/gizmos?1234.
ios_app_link (str):
iOS deep link, for example,
exampleApp://content/page.
ios_app_store_id (int):
iOS app store ID. This is used to check if the user has the
app installed on their device before deep linking. If this
field is set, then the ios_app_link field must also be
present.
formatted_price (str):
Formatted price which can be any characters.
If set, this attribute will be used instead of
'price', for example, Starting at $100.00.
formatted_sale_price (str):
Formatted sale price which can be any
characters. If set, this attribute will be used
instead of 'sale price', for example, On sale
for $80.00.
similar_property_ids (Sequence[str]):
Similar property IDs.
"""
property_id = proto.Field(proto.STRING, number=1,)
property_name = proto.Field(proto.STRING, number=2,)
image_url = proto.Field(proto.STRING, number=3,)
destination_name = proto.Field(proto.STRING, number=4,)
description = proto.Field(proto.STRING, number=5,)
price = proto.Field(proto.STRING, number=6,)
sale_price = proto.Field(proto.STRING, number=7,)
star_rating = proto.Field(proto.INT64, number=8,)
category = proto.Field(proto.STRING, number=9,)
contextual_keywords = proto.RepeatedField(proto.STRING, number=10,)
address = proto.Field(proto.STRING, number=11,)
android_app_link = proto.Field(proto.STRING, number=12,)
ios_app_link = proto.Field(proto.STRING, number=13,)
ios_app_store_id = proto.Field(proto.INT64, number=14,)
formatted_price = proto.Field(proto.STRING, number=15,)
formatted_sale_price = proto.Field(proto.STRING, number=16,)
similar_property_ids = proto.RepeatedField(proto.STRING, number=17,)
class DynamicFlightsAsset(proto.Message):
r"""A dynamic flights asset.
Attributes:
destination_id (str):
Required. Destination ID which can be any
sequence of letters and digits, and must be
unique and match the values of remarketing tag.
Required.
origin_id (str):
Origin ID which can be any sequence of
letters and digits. The ID sequence (destination
ID + origin ID) must be unique.
flight_description (str):
Required. Flight description, for example,
Book your ticket. Required.
image_url (str):
Image URL, for example,
http://www.example.com/image.png. The image will
not be uploaded as image asset.
destination_name (str):
Destination name, for example, Paris.
origin_name (str):
Origin name, for example, London.
flight_price (str):
Flight price which can be number followed by
the alphabetic currency code, ISO 4217 standard.
Use '.' as the decimal mark, for example, 100.00
USD.
flight_sale_price (str):
Flight sale price which can be number followed by the
alphabetic currency code, ISO 4217 standard. Use '.' as the
decimal mark, for example, 80.00 USD. Must be less than the
'flight_price' field.
formatted_price (str):
Formatted price which can be any characters.
If set, this attribute will be used instead of
'price', for example, Starting at $100.00.
formatted_sale_price (str):
Formatted sale price which can be any
characters. If set, this attribute will be used
instead of 'sale price', for example, On sale
for $80.00.
android_app_link (str):
Android deep link, for example,
android-app://com.example.android/http/example.com/gizmos?1234.
ios_app_link (str):
iOS deep link, for example,
exampleApp://content/page.
ios_app_store_id (int):
iOS app store ID. This is used to check if the user has the
app installed on their device before deep linking. If this
field is set, then the ios_app_link field must also be
present.
similar_destination_ids (Sequence[str]):
Similar destination IDs, for example,
PAR,LON.
custom_mapping (str):
A custom field which can be multiple key to values mapping
separated by delimiters (",", "|" and ":"), in the forms of
"<KEY_1>: <VALUE_1>, <VALUE_2>, ... ,<VALUE_N> \| <KEY_2>:
<VALUE_1>, ... ,<VALUE_N> \| ... \| <KEY_N>: <VALUE_1>, ...
,<VALUE_N>" for example, wifi: most \| aircraft: 320, 77W \|
flights: 42 \| legroom: 32".
"""
destination_id = proto.Field(proto.STRING, number=1,)
origin_id = proto.Field(proto.STRING, number=2,)
flight_description = proto.Field(proto.STRING, number=3,)
image_url = proto.Field(proto.STRING, number=4,)
destination_name = proto.Field(proto.STRING, number=5,)
origin_name = proto.Field(proto.STRING, number=6,)
flight_price = proto.Field(proto.STRING, number=7,)
flight_sale_price = proto.Field(proto.STRING, number=8,)
formatted_price = proto.Field(proto.STRING, number=9,)
formatted_sale_price = proto.Field(proto.STRING, number=10,)
android_app_link = proto.Field(proto.STRING, number=11,)
ios_app_link = proto.Field(proto.STRING, number=12,)
ios_app_store_id = proto.Field(proto.INT64, number=13,)
similar_destination_ids = proto.RepeatedField(proto.STRING, number=14,)
custom_mapping = proto.Field(proto.STRING, number=15,)
class DiscoveryCarouselCardAsset(proto.Message):
r"""A Discovery Carousel Card asset.
Attributes:
marketing_image_asset (str):
Asset resource name of the associated 1.91:1
marketing image. This and/or square marketing
image asset is required.
square_marketing_image_asset (str):
Asset resource name of the associated square
marketing image. This and/or a marketing image
asset is required.
portrait_marketing_image_asset (str):
Asset resource name of the associated 4:5
portrait marketing image.
headline (str):
Required. Headline of the carousel card.
call_to_action_text (str):
Call to action text.
"""
marketing_image_asset = proto.Field(proto.STRING, number=1,)
square_marketing_image_asset = proto.Field(proto.STRING, number=2,)
portrait_marketing_image_asset = proto.Field(proto.STRING, number=3,)
headline = proto.Field(proto.STRING, number=4,)
call_to_action_text = proto.Field(proto.STRING, number=5,)
class DynamicTravelAsset(proto.Message):
r"""A dynamic travel asset.
Attributes:
destination_id (str):
Required. Destination ID which can be any
sequence of letters and digits, and must be
unique and match the values of remarketing tag.
Required.
origin_id (str):
Origin ID which can be any sequence of
letters and digits. The ID sequence (destination
ID + origin ID) must be unique.
title (str):
Required. Title, for example, Book your train
ticket. Required.
destination_name (str):
Destination name, for example, Paris.
destination_address (str):
Destination address which can be specified in
one of the following formats. (1) City, state,
code, country, for example, Mountain View, CA,
USA. (2) Full address, for example, 123
Boulevard St, Mountain View, CA 94043. (3)
Latitude-longitude in the DDD format, for
example, 41.40338, 2.17403.
origin_name (str):
Origin name, for example, London.
price (str):
Price which can be a number followed by the
alphabetic currency code, ISO 4217 standard. Use
'.' as the decimal mark, for example, 100.00
USD.
sale_price (str):
Sale price which can be a number followed by
the alphabetic currency code, ISO 4217 standard.
Use '.' as the decimal mark, for example, 80.00
USD. Must be less than the 'price' field.
formatted_price (str):
Formatted price which can be any characters.
If set, this attribute will be used instead of
'price', for example, Starting at $100.00.
formatted_sale_price (str):
Formatted sale price which can be any
characters. If set, this attribute will be used
instead of 'sale price', for example, On sale
for $80.00.
category (str):
Category, for example, Express.
contextual_keywords (Sequence[str]):
Contextual keywords, for example, Paris
trains.
similar_destination_ids (Sequence[str]):
Similar destination IDs, for example, NYC.
image_url (str):
Image URL, for example,
http://www.example.com/image.png. The image will
not be uploaded as image asset.
android_app_link (str):
Android deep link, for example,
android-app://com.example.android/http/example.com/gizmos?1234.
ios_app_link (str):
iOS deep link, for example,
exampleApp://content/page.
ios_app_store_id (int):
iOS app store ID. This is used to check if the user has the
app installed on their device before deep linking. If this
field is set, then the ios_app_link field must also be
present.
"""
destination_id = proto.Field(proto.STRING, number=1,)
origin_id = proto.Field(proto.STRING, number=2,)
title = proto.Field(proto.STRING, number=3,)
destination_name = proto.Field(proto.STRING, number=4,)
destination_address = proto.Field(proto.STRING, number=5,)
origin_name = proto.Field(proto.STRING, number=6,)
price = proto.Field(proto.STRING, number=7,)
sale_price = proto.Field(proto.STRING, number=8,)
formatted_price = proto.Field(proto.STRING, number=9,)
formatted_sale_price = proto.Field(proto.STRING, number=10,)
category = proto.Field(proto.STRING, number=11,)
contextual_keywords = proto.RepeatedField(proto.STRING, number=12,)
similar_destination_ids = proto.RepeatedField(proto.STRING, number=13,)
image_url = proto.Field(proto.STRING, number=14,)
android_app_link = proto.Field(proto.STRING, number=15,)
ios_app_link = proto.Field(proto.STRING, number=16,)
ios_app_store_id = proto.Field(proto.INT64, number=17,)
class DynamicLocalAsset(proto.Message):
r"""A dynamic local asset.
Attributes:
deal_id (str):
Required. Deal ID which can be any sequence
of letters and digits, and must be unique and
match the values of remarketing tag. Required.
deal_name (str):
Required. Deal name, for example, 50% off at
Mountain View Grocers. Required.
subtitle (str):
Subtitle, for example, Groceries.
description (str):
Description, for example, Save on your weekly
bill.
price (str):
Price which can be a number followed by the
alphabetic currency code, ISO 4217 standard. Use
'.' as the decimal mark, for example, 100.00
USD.
sale_price (str):
Sale price which can be number followed by
the alphabetic currency code, ISO 4217 standard.
Use '.' as the decimal mark, for example, 80.00
USD. Must be less than the 'price' field.
image_url (str):
Image URL, for example,
http://www.example.com/image.png. The image will
not be uploaded as image asset.
address (str):
Address which can be specified in one of the
following formats. (1) City, state, code,
country, for example, Mountain View, CA, USA.
(2) Full address, for example, 123 Boulevard St,
Mountain View, CA 94043. (3) Latitude-longitude
in the DDD format, for example, 41.40338,
2.17403.
category (str):
Category, for example, Food.
contextual_keywords (Sequence[str]):
Contextual keywords, for example, Save
groceries coupons.
formatted_price (str):
Formatted price which can be any characters.
If set, this attribute will be used instead of
'price', for example, Starting at $100.00.
formatted_sale_price (str):
Formatted sale price which can be any
characters. If set, this attribute will be used
instead of 'sale price', for example, On sale
for $80.00.
android_app_link (str):
Android deep link, for example,
android-app://com.example.android/http/example.com/gizmos?1234.
similar_deal_ids (Sequence[str]):
Similar deal IDs, for example, 1275.
ios_app_link (str):
iOS deep link, for example,
exampleApp://content/page.
ios_app_store_id (int):
iOS app store ID. This is used to check if the user has the
app installed on their device before deep linking. If this
field is set, then the ios_app_link field must also be
present.
"""
deal_id = proto.Field(proto.STRING, number=1,)
deal_name = proto.Field(proto.STRING, number=2,)
subtitle = proto.Field(proto.STRING, number=3,)
description = proto.Field(proto.STRING, number=4,)
price = proto.Field(proto.STRING, number=5,)
sale_price = proto.Field(proto.STRING, number=6,)
image_url = proto.Field(proto.STRING, number=7,)
address = proto.Field(proto.STRING, number=8,)
category = proto.Field(proto.STRING, number=9,)
contextual_keywords = proto.RepeatedField(proto.STRING, number=10,)
formatted_price = proto.Field(proto.STRING, number=11,)
formatted_sale_price = proto.Field(proto.STRING, number=12,)
android_app_link = proto.Field(proto.STRING, number=13,)
similar_deal_ids = proto.RepeatedField(proto.STRING, number=14,)
ios_app_link = proto.Field(proto.STRING, number=15,)
ios_app_store_id = proto.Field(proto.INT64, number=16,)
class DynamicJobsAsset(proto.Message):
r"""A dynamic jobs asset.
Attributes:
job_id (str):
Required. Job ID which can be any sequence of
letters and digits, and must be unique and match
the values of remarketing tag. Required.
location_id (str):
Location ID which can be any sequence of
letters and digits. The ID sequence (job ID +
location ID) must be unique.
job_title (str):
Required. Job title, for example, Software
engineer. Required.
job_subtitle (str):
Job subtitle, for example, Level II.
description (str):
Description, for example, Apply your
technical skills.
image_url (str):
Image URL, for example,
http://www.example.com/image.png. The image will
not be uploaded as image asset.
job_category (str):
Job category, for example, Technical.
contextual_keywords (Sequence[str]):
Contextual keywords, for example, Software
engineering job.
address (str):
Address which can be specified in one of the
following formats. (1) City, state, code,
country, for example, Mountain View, CA, USA.
(2) Full address, for example, 123 Boulevard St,
Mountain View, CA 94043. (3) Latitude-longitude
in the DDD format, for example, 41.40338,
2.17403.
salary (str):
Salary, for example, $100,000.
android_app_link (str):
Android deep link, for example,
android-app://com.example.android/http/example.com/gizmos?1234.
similar_job_ids (Sequence[str]):
Similar job IDs, for example, 1275.
ios_app_link (str):
iOS deep link, for example,
exampleApp://content/page.
ios_app_store_id (int):
iOS app store ID. This is used to check if the user has the
app installed on their device before deep linking. If this
field is set, then the ios_app_link field must also be
present.
"""
job_id = proto.Field(proto.STRING, number=1,)
location_id = proto.Field(proto.STRING, number=2,)
job_title = proto.Field(proto.STRING, number=3,)
job_subtitle = proto.Field(proto.STRING, number=4,)
description = proto.Field(proto.STRING, number=5,)
image_url = proto.Field(proto.STRING, number=6,)
job_category = proto.Field(proto.STRING, number=7,)
contextual_keywords = proto.RepeatedField(proto.STRING, number=8,)
address = proto.Field(proto.STRING, number=9,)
salary = proto.Field(proto.STRING, number=10,)
android_app_link = proto.Field(proto.STRING, number=11,)
similar_job_ids = proto.RepeatedField(proto.STRING, number=12,)
ios_app_link = proto.Field(proto.STRING, number=13,)
ios_app_store_id = proto.Field(proto.INT64, number=14,)
__all__ = tuple(sorted(__protobuf__.manifest))
| {
"content_hash": "9d61e8bb492cb1b3cb46c8bb74a3995a",
"timestamp": "",
"source": "github",
"line_count": 1476,
"max_line_length": 153,
"avg_line_length": 40.639566395663955,
"alnum_prop": 0.6293344891971192,
"repo_name": "googleads/google-ads-python",
"id": "213a8e9c153f2d22b46e81e4dc5f5c00714f992a",
"size": "60584",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/ads/googleads/v11/common/types/asset_types.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
} |
"""
Measure resonators, one at a time, with the readout tone centered in the filterbank bin.
"""
from __future__ import division
import time
import numpy as np
from kid_readout.roach import analog, calculate, hardware_tools, tools
from kid_readout.measurement import acquire, basic
from kid_readout.equipment import hardware, starcryo_temps
from equipment.srs import lockin
from equipment.custom import mmwave_source
from kid_readout.settings import LOCKIN_SERIAL_PORT
acquire.show_settings()
acquire.show_git_status()
import logging
logger = acquire.get_script_logger(__file__, level=logging.DEBUG)
# Parameters
suffix = 'test'
attenuations = [0]
f_center = 1e6 * np.array([3420.5])
fractional_frequency_shift = 0
f_center *= (1 + fractional_frequency_shift)
df_baseband_target = 15e3
fine_sweep_num_linewidths = 5
f_sweep_span = 2e6 # The total span of the baseband tones
coarse_stride = 32
f_lo_spacing = 2.5e3 # This is the smallest resolution available
f_baseband_minimum = 100e6 # Keep the tones away from the LO by at least this frequency.
sweep_length_seconds = 0.01
stream_length_seconds = 10
# Hardware
temperature = starcryo_temps.Temperature()
lock = lockin.SR830(serial_device=LOCKIN_SERIAL_PORT)
lock.identification # This seems to be necessary to wake up the lockin
mmw = mmwave_source.MMWaveSource()
mmw.set_attenuator_ticks(0, 0)
mmw.multiplier_input = 'thermal'
mmw.ttl_modulation_source = "roach_2"
mmw.waveguide_twist_angle = 0
conditioner = analog.HeterodyneMarkII()
hw = hardware.Hardware(temperature, lock, mmw, conditioner)
ri = hardware_tools.r2h11nc_with_mk2(initialize=True, use_config=False)
ri.set_modulation_output('high')
ri.iq_delay = -1
ri.adc_valon.set_ref_select(0) # internal
assert np.all(ri.adc_valon.get_phase_locks())
# Calculate sweep parameters, LO and baseband sweep frequencies
ri_state = ri.state
tone_sample_exponent = int(np.round(np.log2(ri_state.adc_sample_rate / df_baseband_target)))
df_baseband = ri_state.adc_sample_rate / 2 ** tone_sample_exponent
num_sweep_tones = int(f_sweep_span / df_baseband)
f_baseband = f_baseband_minimum + ri.state.adc_sample_rate / 2 ** tone_sample_exponent * np.arange(num_sweep_tones)
f_lo_center = f_lo_spacing * np.round((f_center - f_baseband.mean()) / f_lo_spacing)
logger.info("Fine sweep using {:d} tones spanning {:.1f} MHz with resolution {:.0f} Hz (2^{:d} samples)".format(
num_sweep_tones, 1e-6 * f_baseband.ptp(), df_baseband, tone_sample_exponent))
logger.info("Coarse sweep using {:d} tones spanning {:.1f} MHz with resolution {:.0f} Hz (2^{:d} samples)".format(
num_sweep_tones // coarse_stride, 1e-6 * f_baseband.ptp(), coarse_stride * df_baseband, tone_sample_exponent))
# Run
npd = acquire.new_npy_directory(suffix=suffix)
tic = time.time()
try:
for lo_index, f_lo in enumerate(f_lo_center):
assert np.all(ri.adc_valon.get_phase_locks())
tools.set_and_attempt_external_phase_lock(ri=ri, f_lo=1e-6 * f_lo, f_lo_spacing=1e-6 * f_lo_spacing)
for attenuation_index, attenuation in enumerate(attenuations):
ri.set_dac_attenuator(attenuation)
#ri.set_tone_baseband_freqs(freqs=1e-6 * np.array([f_baseband[0]]), nsamp=2 ** tone_sample_exponent)
#time.sleep(1)
#tools.optimize_fft_gain(ri, fraction_of_maximum=0.5)
ri.set_fft_gain(4)
coarse_state = hw.state()
coarse_state['lo_index'] = lo_index
coarse_state['attenuation_index'] = attenuation_index
coarse_sweep = acquire.run_sweep(ri=ri, tone_banks=1e-6 * (f_lo + f_baseband[::coarse_stride, np.newaxis]),
num_tone_samples=2 ** tone_sample_exponent,
length_seconds=stream_length_seconds, state=coarse_state,
verbose=True)[0]
npd.write(coarse_sweep)
coarse_f_r = coarse_sweep.resonator.f_0
coarse_Q = coarse_sweep.resonator.Q
logger.info("Coarse sweep f_r = {:.3f} MHz +/- {:.0f} Hz".format(1e-6 * coarse_f_r,
coarse_sweep.resonator.f_0_error))
logger.info("Coarse sweep Q = {:.0f} +/- {:.0f}".format(coarse_Q, coarse_sweep.resonator.Q_error))
raise Exception()
df_filterbank = calculate.stream_sample_rate(ri_state)
f_baseband_bin_center = df_filterbank * np.round(f_baseband.mean() / df_filterbank)
f_lo_fine = f_lo_spacing * np.round((coarse_f_r - f_baseband_bin_center) / f_lo_spacing)
assert np.all(ri.adc_valon.get_phase_locks())
tools.set_and_attempt_external_phase_lock(ri=ri, f_lo=1e-6 * f_lo, f_lo_spacing=1e-6 * f_lo_spacing)
#fine_indices = np.where(np.abs(f_lo_fine + f_baseband - coarse_f_r) <=
# (fine_sweep_num_linewidths / 2) * (coarse_f_r / coarse_Q))[0]
fine_indices = np.arange(f_baseband.size)
fine_sweep = acquire.run_sweep(ri=ri, tone_banks=1e-6 * (f_lo + f_baseband[fine_indices, np.newaxis]),
num_tone_samples=2 ** tone_sample_exponent,
length_seconds=stream_length_seconds, state=hw.state())[0]
ri.set_tone_freqs(np.array([]))
logger.info("Recording {:.1f} s stream with source off".format(stream_length_seconds))
off_stream = ri.get_measurement(num_seconds=stream_length_seconds, demod=True, state=hw.state())[0]
ri.set_modulation_output(7)
logger.info("Recording {:.1f} s stream with source modulating".format(stream_length_seconds))
mod_stream = ri.get_measurement(num_seconds=stream_length_seconds, demod=True, state=hw.state())[0]
ri.set_modulation_output('high')
sweep_stream_list = basic.SingleSweepStreamList(single_sweep=fine_sweep,
stream_list=[off_stream, mod_stream],
state={'lo_index': lo_index,
'attenuation_index': attenuation_index})
npd.write(sweep_stream_list)
npd.write(ri.get_adc_measurement())
finally:
ri.set_modulation_output('high')
ri.set_dac_attenuator(62)
npd.close()
print("Wrote {}".format(npd.root_path))
print("Elapsed time {:.0f} minutes.".format((time.time() - tic) / 60))
| {
"content_hash": "a361c2d32b4eb149f9bbc54f86a0be45",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 119,
"avg_line_length": 52.504,
"alnum_prop": 0.6286759104068261,
"repo_name": "ColumbiaCMB/kid_readout",
"id": "ab430e62f12956b0171758b77f7ffe141927d127",
"size": "6563",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/data_taking_scripts/cooldown/2017-11-23_starcryo/r2h11nc_test.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "13672"
},
{
"name": "Python",
"bytes": "2033932"
}
],
"symlink_target": ""
} |
import warnings
from sympy import Basic, Symbol, Integer
from sympy.core import sympify
from sympy.core.basic import S
from sympy.polys import Poly, roots, cancel
from sympy.simplify import simplify
from sympy.utilities import any
# from sympy.printing import StrPrinter /cyclic/
import random
class NonSquareMatrixException(Exception):
pass
class ShapeError(ValueError):
"""Wrong matrix shape"""
pass
class MatrixError(Exception):
pass
def _dims_to_nm(dims):
"""Converts dimensions tuple (or any object with length 1 or 2) or scalar
in dims to matrix dimensions n and m."""
try:
l = len(dims)
except TypeError:
dims = (dims,)
l = 1
# This will work for nd-array too when they are added to sympy.
try:
for dim in dims:
assert (dim > 0)
except AssertionError:
raise ValueError("Matrix dimensions should be positive integers!")
if l == 2:
n, m = map(int, dims)
elif l == 1:
n = m = int(dims[0])
else:
raise ValueError("Matrix dimensions should be a two-element tuple of ints or a single int!")
return n, m
def _iszero(x):
return x == 0
class DeferredVector(object):
def __init__(self,name):
self.name=name
def __getitem__(self,i):
component_name = '%s[%d]'%(self.name,i)
return Symbol(component_name)
def __str__(self):
return StrPrinter.doprint(self)
def __repr__(self):
return StrPrinter.doprint(self)
class Matrix(object):
# Added just for numpy compatibility
# TODO: investigate about __array_priority__
__array_priority__ = 10.0
def __init__(self, *args):
"""
Matrix can be constructed with values or a rule.
>>> from sympy import Matrix, I
>>> Matrix( ((1,2+I), (3,4)) ) #doctest:+NORMALIZE_WHITESPACE
[1, 2 + I]
[3, 4]
>>> Matrix(2, 2, lambda i,j: (i+1)*j ) #doctest:+NORMALIZE_WHITESPACE
[0, 1]
[0, 2]
"""
if len(args) == 3 and callable(args[2]):
operation = args[2]
self.rows = int(args[0])
self.cols = int(args[1])
self.mat = []
for i in range(self.rows):
for j in range(self.cols):
self.mat.append(sympify(operation(i, j)))
elif len(args)==3 and isinstance(args[2], (list, tuple)):
self.rows=args[0]
self.cols=args[1]
mat = args[2]
if len(mat) != self.rows*self.cols:
raise MatrixError('List length should be equal to rows*columns')
self.mat = map(lambda i: sympify(i), mat)
elif len(args) == 1:
mat = args[0]
if isinstance(mat, Matrix):
self.rows = mat.rows
self.cols = mat.cols
self.mat = mat[:]
return
elif hasattr(mat, "__array__"):
# NumPy array or matrix or some other object that implements
# __array__. So let's first use this method to get a
# numpy.array() and then make a python list out of it.
arr = mat.__array__()
if len(arr.shape) == 2:
self.rows, self.cols = arr.shape[0], arr.shape[1]
self.mat = map(lambda i: sympify(i), arr.ravel())
return
elif len(arr.shape) == 1:
self.rows, self.cols = 1, arr.shape[0]
self.mat = [0]*self.cols
for i in xrange(len(arr)):
self.mat[i] = sympify(arr[i])
return
else:
raise NotImplementedError("Sympy supports just 1D and 2D matrices")
elif not isinstance(mat, (list, tuple)):
raise TypeError("Matrix constructor doesn't accept %s as input" % str(type(mat)))
self.rows = len(mat)
if len(mat) != 0:
if not isinstance(mat[0], (list, tuple)):
self.cols = 1
self.mat = map(lambda i: sympify(i), mat)
return
self.cols = len(mat[0])
else:
self.cols = 0
self.mat = []
for j in xrange(self.rows):
assert len(mat[j])==self.cols
for i in xrange(self.cols):
self.mat.append(sympify(mat[j][i]))
elif len(args) == 0:
# Empty Matrix
self.rows = self.cols = 0
self.mat = []
else:
# TODO: on 0.7.0 delete this and uncomment the last line
mat = args
if not isinstance(mat[0], (list, tuple)):
# make each element a singleton
mat = [ [element] for element in mat ]
warnings.warn("Deprecated constructor, use brackets: Matrix(%s)" % str(mat))
self.rows=len(mat)
self.cols=len(mat[0])
self.mat=[]
for j in xrange(self.rows):
assert len(mat[j])==self.cols
for i in xrange(self.cols):
self.mat.append(sympify(mat[j][i]))
#raise TypeError("Data type not understood")
def key2ij(self,key):
"""Converts key=(4,6) to 4,6 and ensures the key is correct."""
if not (isinstance(key,(list, tuple)) and len(key) == 2):
raise TypeError("wrong syntax: a[%s]. Use a[i,j] or a[(i,j)]"
%repr(key))
i,j=key
if not (i>=0 and i<self.rows and j>=0 and j < self.cols):
print self.rows, " ", self.cols
raise IndexError("Index out of range: a[%s]"%repr(key))
return i,j
def transpose(self):
"""
Matrix transposition.
>>> from sympy import Matrix, I
>>> m=Matrix(((1,2+I),(3,4)))
>>> m #doctest: +NORMALIZE_WHITESPACE
[1, 2 + I]
[3, 4]
>>> m.transpose() #doctest: +NORMALIZE_WHITESPACE
[ 1, 3]
[2 + I, 4]
>>> m.T == m.transpose()
True
"""
a = [0]*self.cols*self.rows
for i in xrange(self.cols):
a[i*self.rows:(i+1)*self.rows] = self.mat[i::self.cols]
return Matrix(self.cols,self.rows,a)
T = property(transpose,None,None,"Matrix transposition.")
def conjugate(self):
"""By-element conjugation."""
out = Matrix(self.rows,self.cols,
lambda i,j: self[i,j].conjugate())
return out
C = property(conjugate,None,None,"By-element conjugation.")
@property
def H(self):
"""
Hermite conjugation.
>>> from sympy import Matrix, I
>>> m=Matrix(((1,2+I),(3,4)))
>>> m #doctest: +NORMALIZE_WHITESPACE
[1, 2 + I]
[3, 4]
>>> m.H #doctest: +NORMALIZE_WHITESPACE
[ 1, 3]
[2 - I, 4]
"""
out = self.T.C
return out
@property
def D(self):
"""Dirac conjugation."""
from sympy.physics.matrices import mgamma
out = self.H * mgamma(0)
return out
def __getitem__(self,key):
"""
>>> from sympy import Matrix, I
>>> m=Matrix(((1,2+I),(3,4)))
>>> m #doctest: +NORMALIZE_WHITESPACE
[1, 2 + I]
[3, 4]
>>> m[1,0]
3
>>> m.H[1,0]
2 - I
"""
if type(key) is tuple:
i, j = key
if type(i) is slice or type(j) is slice:
return self.submatrix(key)
else:
# a2idx inlined
try:
i = i.__int__()
except AttributeError:
try:
i = i.__index__()
except AttributeError:
raise IndexError("Invalid index a[%r]" % (key,))
# a2idx inlined
try:
j = j.__int__()
except AttributeError:
try:
j = j.__index__()
except AttributeError:
raise IndexError("Invalid index a[%r]" % (key,))
if not (i>=0 and i<self.rows and j>=0 and j < self.cols):
raise IndexError("Index out of range: a[%s]" % (key,))
else:
return self.mat[i*self.cols + j]
else:
# row-wise decomposition of matrix
if type(key) is slice:
return self.mat[key]
else:
k = a2idx(key)
if k is not None:
return self.mat[k]
raise IndexError("Invalid index: a[%s]" % repr(key))
def __setitem__(self, key, value):
"""
>>> from sympy import Matrix, I
>>> m=Matrix(((1,2+I),(3,4)))
>>> m #doctest: +NORMALIZE_WHITESPACE
[1, 2 + I]
[3, 4]
>>> m[1,0]=9
>>> m #doctest: +NORMALIZE_WHITESPACE
[1, 2 + I]
[9, 4]
"""
if type(key) is tuple:
i, j = key
if type(i) is slice or type(j) is slice:
if isinstance(value, Matrix):
self.copyin_matrix(key, value)
return
if isinstance(value, (list, tuple)):
self.copyin_list(key, value)
return
else:
# a2idx inlined
try:
i = i.__int__()
except AttributeError:
try:
i = i.__index__()
except AttributeError:
raise IndexError("Invalid index a[%r]" % (key,))
# a2idx inlined
try:
j = j.__int__()
except AttributeError:
try:
j = j.__index__()
except AttributeError:
raise IndexError("Invalid index a[%r]" % (key,))
if not (i>=0 and i<self.rows and j>=0 and j < self.cols):
raise IndexError("Index out of range: a[%s]" % (key,))
else:
self.mat[i*self.cols + j] = sympify(value)
return
else:
# row-wise decomposition of matrix
if type(key) is slice:
raise IndexError("Vector slices not implemented yet.")
else:
k = a2idx(key)
if k is not None:
self.mat[k] = sympify(value)
return
raise IndexError("Invalid index: a[%s]"%repr(key))
def __array__(self):
return matrix2numpy(self)
def tolist(self):
"""
Return the Matrix converted in a python list.
>>> from sympy import Matrix
>>> m=Matrix(3, 3, range(9))
>>> m
[0, 1, 2]
[3, 4, 5]
[6, 7, 8]
>>> m.tolist()
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
"""
ret = [0]*self.rows
for i in xrange(self.rows):
ret[i] = self.mat[i*self.cols:(i+1)*self.cols]
return ret
def copyin_matrix(self, key, value):
rlo, rhi = self.slice2bounds(key[0], self.rows)
clo, chi = self.slice2bounds(key[1], self.cols)
assert value.rows == rhi - rlo and value.cols == chi - clo
for i in range(value.rows):
for j in range(value.cols):
self[i+rlo, j+clo] = sympify(value[i,j])
def copyin_list(self, key, value):
assert isinstance(value, (list, tuple))
self.copyin_matrix(key, Matrix(value))
def hash(self):
"""Compute a hash every time, because the matrix elements
could change."""
return hash(self.__str__() )
@property
def shape(self):
return (self.rows, self.cols)
def __rmul__(self,a):
if hasattr(a, "__array__") and a.shape != ():
return matrix_multiply(a,self)
out = Matrix(self.rows,self.cols,map(lambda i: a*i,self.mat))
return out
def expand(self):
out = Matrix(self.rows,self.cols,map(lambda i: i.expand(), self.mat))
return out
def combine(self):
out = Matrix(self.rows,self.cols,map(lambda i: i.combine(),self.mat))
return out
def subs(self, *args):
out = Matrix(self.rows,self.cols,map(lambda i: i.subs(*args),self.mat))
return out
def __sub__(self,a):
return self + (-a)
def __mul__(self,a):
if hasattr(a, "__array__") and a.shape != ():
return matrix_multiply(self,a)
out = Matrix(self.rows,self.cols,map(lambda i: i*a,self.mat))
return out
def __pow__(self, num):
if not self.is_square:
raise NonSquareMatrixException()
if isinstance(num, int) or isinstance(num, Integer):
n = int(num)
if n < 0:
return self.inv() ** -n # A**-2 = (A**-1)**2
a = eye(self.cols)
while n:
if n % 2:
a = a * self
n -= 1
self = self * self
n = n // 2
return a
raise NotImplementedError('Can only raise to the power of an integer for now')
def __add__(self,a):
return matrix_add(self,a)
def __radd__(self,a):
return matrix_add(a,self)
def __div__(self,a):
return self * (S.One/a)
def __truediv__(self,a):
return self.__div__(a)
def multiply(self,b):
"""Returns self*b """
return matrix_multiply(self,b)
def add(self,b):
"""Return self+b """
return matrix_add(self,b)
def __neg__(self):
return -1*self
def __eq__(self, a):
if not isinstance(a, (Matrix, Basic)):
a = sympify(a)
if isinstance(a, Matrix):
return self.hash() == a.hash()
else:
return False
def __ne__(self,a):
if not isinstance(a, (Matrix, Basic)):
a = sympify(a)
if isinstance(a, Matrix):
return self.hash() != a.hash()
else:
return True
def _format_str(self, strfunc, rowsep='\n'):
# Build table of string representations of the elements
res = []
# Track per-column max lengths for pretty alignment
maxlen = [0] * self.cols
for i in range(self.rows):
res.append([])
for j in range(self.cols):
string = strfunc(self[i,j])
res[-1].append(string)
maxlen[j] = max(len(string), maxlen[j])
# Patch strings together
for i, row in enumerate(res):
for j, elem in enumerate(row):
# Pad each element up to maxlen so the columns line up
row[j] = elem.rjust(maxlen[j])
res[i] = "[" + ", ".join(row) + "]"
return rowsep.join(res)
def __str__(self):
return StrPrinter.doprint(self)
def __repr__(self):
return StrPrinter.doprint(self)
def inv(self, method="GE", iszerofunc=_iszero, try_block_diag=False):
"""
Calculates the matrix inverse.
According to the "method" parameter, it calls the appropriate method:
GE .... inverse_GE()
LU .... inverse_LU()
ADJ ... inverse_ADJ()
According to the "try_block_diag" parameter, it will try to form block
diagonal matrices using the method get_diag_blocks(), invert these
individually, and then reconstruct the full inverse matrix.
Note, the GE and LU methods may require the matrix to be simplified
before it is inverted in order to properly detect zeros during
pivoting. In difficult cases a custom zero detection function can
be provided by setting the iszerosfunc argument to a function that
should return True if its argument is zero.
"""
assert self.cols==self.rows
if try_block_diag:
blocks = self.get_diag_blocks()
r = []
for block in blocks:
r.append(block.inv(method=method, iszerofunc=iszerofunc))
return block_diag(r)
if method == "GE":
return self.inverse_GE(iszerofunc=iszerofunc)
elif method == "LU":
return self.inverse_LU(iszerofunc=iszerofunc)
elif method == "ADJ":
return self.inverse_ADJ()
else:
raise ValueError("Inversion method unrecognized")
def __mathml__(self):
mml = ""
for i in range(self.rows):
mml += "<matrixrow>"
for j in range(self.cols):
mml += self[i,j].__mathml__()
mml += "</matrixrow>"
return "<matrix>" + mml + "</matrix>"
def row(self, i, f):
"""Elementary row operation using functor"""
for j in range(0, self.cols):
self[i, j] = f(self[i, j], j)
def col(self, j, f):
"""Elementary column operation using functor"""
for i in range(0, self.rows):
self[i, j] = f(self[i, j], i)
def row_swap(self, i, j):
for k in range(0, self.cols):
self[i, k], self[j, k] = self[j, k], self[i, k]
def col_swap(self, i, j):
for k in range(0, self.rows):
self[k, i], self[k, j] = self[k, j], self[k, i]
def row_del(self, i):
self.mat = self.mat[:i*self.cols] + self.mat[(i+1)*self.cols:]
self.rows -= 1
def col_del(self, i):
"""
>>> import sympy
>>> M = sympy.matrices.eye(3)
>>> M.col_del(1)
>>> M #doctest: +NORMALIZE_WHITESPACE
[1, 0]
[0, 0]
[0, 1]
"""
for j in range(self.rows-1, -1, -1):
del self.mat[i+j*self.cols]
self.cols -= 1
def row_join(self, rhs):
"""
Concatenates two matrices along self's last and rhs's first column
>>> from sympy import Matrix
>>> M = Matrix(3,3,lambda i,j: i+j)
>>> V = Matrix(3,1,lambda i,j: 3+i+j)
>>> M.row_join(V)
[0, 1, 2, 3]
[1, 2, 3, 4]
[2, 3, 4, 5]
"""
assert self.rows == rhs.rows
newmat = self.zeros((self.rows, self.cols + rhs.cols))
newmat[:,:self.cols] = self[:,:]
newmat[:,self.cols:] = rhs
return newmat
def col_join(self, bott):
"""
Concatenates two matrices along self's last and bott's first row
>>> from sympy import Matrix
>>> M = Matrix(3,3,lambda i,j: i+j)
>>> V = Matrix(1,3,lambda i,j: 3+i+j)
>>> M.col_join(V)
[0, 1, 2]
[1, 2, 3]
[2, 3, 4]
[3, 4, 5]
"""
assert self.cols == bott.cols
newmat = self.zeros((self.rows+bott.rows, self.cols))
newmat[:self.rows,:] = self[:,:]
newmat[self.rows:,:] = bott
return newmat
def row_insert(self, pos, mti):
"""
>>> from sympy import Matrix, zeros
>>> M = Matrix(3,3,lambda i,j: i+j)
>>> M
[0, 1, 2]
[1, 2, 3]
[2, 3, 4]
>>> V = zeros((1, 3))
>>> V
[0, 0, 0]
>>> M.row_insert(1,V)
[0, 1, 2]
[0, 0, 0]
[1, 2, 3]
[2, 3, 4]
"""
if pos is 0:
return mti.col_join(self)
assert self.cols == mti.cols
newmat = self.zeros((self.rows + mti.rows, self.cols))
newmat[:pos,:] = self[:pos,:]
newmat[pos:pos+mti.rows,:] = mti[:,:]
newmat[pos+mti.rows:,:] = self[pos:,:]
return newmat
def col_insert(self, pos, mti):
"""
>>> from sympy import Matrix, zeros
>>> M = Matrix(3,3,lambda i,j: i+j)
>>> M
[0, 1, 2]
[1, 2, 3]
[2, 3, 4]
>>> V = zeros((3, 1))
>>> V
[0]
[0]
[0]
>>> M.col_insert(1,V)
[0, 0, 1, 2]
[1, 0, 2, 3]
[2, 0, 3, 4]
"""
if pos is 0:
return mti.row_join(self)
assert self.rows == mti.rows
newmat = self.zeros((self.rows, self.cols + mti.cols))
newmat[:,:pos] = self[:,:pos]
newmat[:,pos:pos+mti.cols] = mti[:,:]
newmat[:,pos+mti.cols:] = self[:,pos:]
return newmat
def trace(self):
assert self.cols == self.rows
trace = 0
for i in range(self.cols):
trace += self[i,i]
return trace
def submatrix(self, keys):
"""
>>> from sympy import Matrix
>>> m = Matrix(4,4,lambda i,j: i+j)
>>> m #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3]
[1, 2, 3, 4]
[2, 3, 4, 5]
[3, 4, 5, 6]
>>> m[0:1, 1] #doctest: +NORMALIZE_WHITESPACE
[1]
>>> m[0:2, 0:1] #doctest: +NORMALIZE_WHITESPACE
[0]
[1]
>>> m[2:4, 2:4] #doctest: +NORMALIZE_WHITESPACE
[4, 5]
[5, 6]
"""
assert isinstance(keys[0], slice) or isinstance(keys[1], slice)
rlo, rhi = self.slice2bounds(keys[0], self.rows)
clo, chi = self.slice2bounds(keys[1], self.cols)
if not ( 0<=rlo<=rhi and 0<=clo<=chi ):
raise IndexError("Slice indices out of range: a[%s]"%repr(keys))
outLines, outCols = rhi-rlo, chi-clo
outMat = [0]*outLines*outCols
for i in xrange(outLines):
outMat[i*outCols:(i+1)*outCols] = self.mat[(i+rlo)*self.cols+clo:(i+rlo)*self.cols+chi]
return Matrix(outLines,outCols,outMat)
def slice2bounds(self, key, defmax):
"""
Takes slice or number and returns (min,max) for iteration
Takes a default maxval to deal with the slice ':' which is (none, none)
"""
if isinstance(key, slice):
lo, hi = 0, defmax
if key.start != None:
if key.start >= 0:
lo = key.start
else:
lo = defmax+key.start
if key.stop != None:
if key.stop >= 0:
hi = key.stop
else:
hi = defmax+key.stop
return lo, hi
elif isinstance(key, int):
if key >= 0:
return key, key+1
else:
return defmax+key, defmax+key+1
else:
raise IndexError("Improper index type")
def applyfunc(self, f):
"""
>>> from sympy import Matrix
>>> m = Matrix(2,2,lambda i,j: i*2+j)
>>> m #doctest: +NORMALIZE_WHITESPACE
[0, 1]
[2, 3]
>>> m.applyfunc(lambda i: 2*i) #doctest: +NORMALIZE_WHITESPACE
[0, 2]
[4, 6]
"""
assert callable(f)
out = Matrix(self.rows,self.cols,map(f,self.mat))
return out
def evalf(self, prec=None, **options):
if prec is None:
return self.applyfunc(lambda i: i.evalf(**options))
else:
return self.applyfunc(lambda i: i.evalf(prec, **options))
def reshape(self, _rows, _cols):
"""
>>> from sympy import Matrix
>>> m = Matrix(2,3,lambda i,j: 1)
>>> m #doctest: +NORMALIZE_WHITESPACE
[1, 1, 1]
[1, 1, 1]
>>> m.reshape(1,6) #doctest: +NORMALIZE_WHITESPACE
[1, 1, 1, 1, 1, 1]
>>> m.reshape(3,2) #doctest: +NORMALIZE_WHITESPACE
[1, 1]
[1, 1]
[1, 1]
"""
if self.rows*self.cols != _rows*_cols:
print "Invalid reshape parameters %d %d" % (_rows, _cols)
return Matrix(_rows, _cols, lambda i,j: self.mat[i*_cols + j])
def print_nonzero (self, symb="X"):
"""
Shows location of non-zero entries for fast shape lookup
>>> from sympy import Matrix, matrices
>>> m = Matrix(2,3,lambda i,j: i*3+j)
>>> m #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2]
[3, 4, 5]
>>> m.print_nonzero() #doctest: +NORMALIZE_WHITESPACE
[ XX]
[XXX]
>>> m = matrices.eye(4)
>>> m.print_nonzero("x") #doctest: +NORMALIZE_WHITESPACE
[x ]
[ x ]
[ x ]
[ x]
"""
s="";
for i in range(self.rows):
s+="["
for j in range(self.cols):
if self[i,j] == 0:
s+=" "
else:
s+= symb+""
s+="]\n"
print s
def LUsolve(self, rhs, iszerofunc=_iszero):
"""
Solve the linear system Ax = b for x.
self is the coefficient matrix A and rhs is the right side b.
This is for symbolic matrices, for real or complex ones use
sympy.mpmath.lu_solve or sympy.mpmath.qr_solve.
"""
assert rhs.rows == self.rows
A, perm = self.LUdecomposition_Simple(iszerofunc=_iszero)
n = self.rows
b = rhs.permuteFwd(perm)
# forward substitution, all diag entries are scaled to 1
for i in range(n):
for j in range(i):
b.row(i, lambda x,k: x - b[j,k]*A[i,j])
# backward substitution
for i in range(n-1,-1,-1):
for j in range(i+1, n):
b.row(i, lambda x,k: x - b[j,k]*A[i,j])
b.row(i, lambda x,k: x / A[i,i])
return b
def LUdecomposition(self, iszerofunc=_iszero):
"""
Returns the decomposition LU and the row swaps p.
"""
combined, p = self.LUdecomposition_Simple(iszerofunc=_iszero)
L = self.zeros(self.rows)
U = self.zeros(self.rows)
for i in range(self.rows):
for j in range(self.rows):
if i > j:
L[i,j] = combined[i,j]
else:
if i == j:
L[i,i] = 1
U[i,j] = combined[i,j]
return L, U, p
def LUdecomposition_Simple(self, iszerofunc=_iszero):
"""
Returns A comprised of L,U (L's diag entries are 1) and
p which is the list of the row swaps (in order).
"""
assert self.rows == self.cols
n = self.rows
A = self[:,:]
p = []
# factorization
for j in range(n):
for i in range(j):
for k in range(i):
A[i,j] = A[i,j] - A[i,k]*A[k,j]
pivot = -1
for i in range(j,n):
for k in range(j):
A[i,j] = A[i,j] - A[i,k]*A[k,j]
# find the first non-zero pivot, includes any expression
if pivot == -1 and not iszerofunc(A[i,j]):
pivot = i
if pivot < 0:
raise ValueError("Error: non-invertible matrix passed to LUdecomposition_Simple()")
if pivot != j: # row must be swapped
A.row_swap(pivot,j)
p.append([pivot,j])
assert not iszerofunc(A[j,j])
scale = 1 / A[j,j]
for i in range(j+1,n):
A[i,j] = A[i,j] * scale
return A, p
def LUdecompositionFF(self):
"""
Returns 4 matrices P, L, D, U such that PA = L D**-1 U.
From the paper "fraction-free matrix factors..." by Zhou and Jeffrey
"""
n, m = self.rows, self.cols
U, L, P = self[:,:], eye(n), eye(n)
DD = zeros(n) # store it smarter since it's just diagonal
oldpivot = 1
for k in range(n-1):
if U[k,k] == 0:
kpivot = k+1
Notfound = True
while kpivot < n and Notfound:
if U[kpivot, k] != 0:
Notfound = False
else:
kpivot = kpivot + 1
if kpivot == n+1:
raise ValueError("Matrix is not full rank")
else:
swap = U[k, k:]
U[k,k:] = U[kpivot,k:]
U[kpivot, k:] = swap
swap = P[k, k:]
P[k, k:] = P[kpivot, k:]
P[kpivot, k:] = swap
assert U[k, k] != 0
L[k,k] = U[k,k]
DD[k,k] = oldpivot * U[k,k]
assert DD[k,k] != 0
Ukk = U[k,k]
for i in range(k+1, n):
L[i,k] = U[i,k]
Uik = U[i,k]
for j in range(k+1, m):
U[i,j] = (Ukk * U[i,j] - U[k,j]*Uik) / oldpivot
U[i,k] = 0
oldpivot = U[k,k]
DD[n-1,n-1] = oldpivot
return P, L, DD, U
def cofactorMatrix(self, method="berkowitz"):
out = Matrix(self.rows, self.cols, lambda i,j:
self.cofactor(i, j, method))
return out
def minorEntry(self, i, j, method="berkowitz"):
assert 0 <= i < self.rows and 0 <= j < self.cols
return self.minorMatrix(i,j).det(method)
def minorMatrix(self, i, j):
assert 0 <= i < self.rows and 0 <= j < self.cols
return self.delRowCol(i,j)
def cofactor(self, i, j, method="berkowitz"):
if (i+j) % 2 == 0:
return self.minorEntry(i, j, method)
else:
return -1 * self.minorEntry(i, j, method)
def jacobian(self, X):
"""
Calculates the Jacobian matrix (derivative of a vectorial function).
*self*
A vector of expressions representing functions f_i(x_1, ..., x_n).
*X*
The set of x_i's in order, it can be a list or a Matrix
Both self and X can be a row or a column matrix in any order
(jacobian() should always work).
Examples::
>>> from sympy import sin, cos, Matrix
>>> from sympy.abc import rho, phi
>>> X = Matrix([rho*cos(phi), rho*sin(phi), rho**2])
>>> Y = Matrix([rho, phi])
>>> X.jacobian(Y)
[cos(phi), -rho*sin(phi)]
[sin(phi), rho*cos(phi)]
[ 2*rho, 0]
>>> X = Matrix([rho*cos(phi), rho*sin(phi)])
>>> X.jacobian(Y)
[cos(phi), -rho*sin(phi)]
[sin(phi), rho*cos(phi)]
"""
if not isinstance(X, Matrix):
X = Matrix(X)
# Both X and self can be a row or a column matrix, so we need to make
# sure all valid combinations work, but everything else fails:
assert len(self.shape) == 2
assert len(X.shape) == 2
if self.shape[0] == 1:
m = self.shape[1]
elif self.shape[1] == 1:
m = self.shape[0]
else:
raise TypeError("self must be a row or a column matrix")
if X.shape[0] == 1:
n = X.shape[1]
elif X.shape[1] == 1:
n = X.shape[0]
else:
raise TypeError("X must be a row or a column matrix")
# m is the number of functions and n is the number of variables
# computing the Jacobian is now easy:
return Matrix(m, n, lambda j, i: self[j].diff(X[i]))
def QRdecomposition(self):
"""
Return Q,R where A = Q*R, Q is orthogonal and R is upper triangular.
Assumes full-rank square (for now).
"""
assert self.rows == self.cols
n = self.rows
Q, R = self.zeros(n), self.zeros(n)
for j in range(n): # for each column vector
tmp = self[:,j] # take original v
for i in range(j):
# subtract the project of self on new vector
tmp -= Q[:,i] * self[:,j].dot(Q[:,i])
tmp.expand()
# normalize it
R[j,j] = tmp.norm()
Q[:,j] = tmp / R[j,j]
assert Q[:,j].norm() == 1
for i in range(j):
R[i,j] = Q[:,i].dot(self[:,j])
return Q,R
def QRsolve(self, b):
"""
Solve the linear system 'Ax = b'.
'self' is the matrix 'A', the method argument is the vector
'b'. The method returns the solution vector 'x'. If 'b' is a
matrix, the system is solved for each column of 'b' and the
return value is a matrix of the same shape as 'b'.
This method is slower (approximately by a factor of 2) but
more stable for floating-point arithmetic than the LUsolve method.
However, LUsolve usually uses an exact arithmetic, so you don't need
to use QRsolve.
This is mainly for educational purposes and symbolic matrices, for real
(or complex) matrices use sympy.mpmath.qr_solve.
"""
Q, R = self.QRdecomposition()
y = Q.T * b
# back substitution to solve R*x = y:
# We build up the result "backwards" in the vector 'x' and reverse it
# only in the end.
x = []
n = R.rows
for j in range(n-1, -1, -1):
tmp = y[j,:]
for k in range(j+1, n):
tmp -= R[j,k] * x[n-1-k]
x.append(tmp/R[j,j])
return Matrix([row.mat for row in reversed(x)])
# Utility functions
def simplify(self):
"""Simplify the elements of a matrix in place."""
for i in xrange(len(self.mat)):
self.mat[i] = simplify(self.mat[i])
#def evaluate(self): # no more eval() so should be removed
# for i in range(self.rows):
# for j in range(self.cols):
# self[i,j] = self[i,j].eval()
def cross(self, b):
assert isinstance(b, (list, tuple, Matrix))
if not (self.rows == 1 and self.cols == 3 or \
self.rows == 3 and self.cols == 1 ) and \
(b.rows == 1 and b.cols == 3 or \
b.rows == 3 and b.cols == 1):
raise ValueError("Dimensions incorrect for cross product")
else:
return Matrix(1,3,((self[1]*b[2] - self[2]*b[1]),
(self[2]*b[0] - self[0]*b[2]),
(self[0]*b[1] - self[1]*b[0])))
def dot(self, b):
assert isinstance(b, (list, tuple, Matrix))
if isinstance(b, (list, tuple)):
m = len(b)
else:
m = b.rows * b.cols
assert self.cols*self.rows == m
prod = 0
for i in range(m):
prod += self[i] * b[i]
return prod
def norm(self):
assert self.rows == 1 or self.cols == 1
out = sympify(0)
for i in range(self.rows * self.cols):
out += self[i]*self[i]
return out**S.Half
def normalized(self):
assert self.rows == 1 or self.cols == 1
norm = self.norm()
out = self.applyfunc(lambda i: i / norm)
return out
def project(self, v):
"""Project onto v."""
return v * (self.dot(v) / v.dot(v))
def permuteBkwd(self, perm):
copy = self[:,:]
for i in range(len(perm)-1, -1, -1):
copy.row_swap(perm[i][0], perm[i][1])
return copy
def permuteFwd(self, perm):
copy = self[:,:]
for i in range(len(perm)):
copy.row_swap(perm[i][0], perm[i][1])
return copy
def delRowCol(self, i, j):
# used only for cofactors, makes a copy
M = self[:,:]
M.row_del(i)
M.col_del(j)
return M
def zeronm(self, n, m):
# used so that certain functions above can use this
# then only this func need be overloaded in subclasses
warnings.warn( 'Deprecated: use zeros() instead.' )
return Matrix(n,m,[S.Zero]*n*m)
def zero(self, n):
"""Returns a n x n matrix of zeros."""
warnings.warn( 'Deprecated: use zeros() instead.' )
return Matrix(n,n,[S.Zero]*n*n)
def zeros(self, dims):
"""Returns a dims = (d1,d2) matrix of zeros."""
n, m = _dims_to_nm( dims )
return Matrix(n,m,[S.Zero]*n*m)
def eye(self, n):
"""Returns the identity matrix of size n."""
tmp = self.zeros(n)
for i in range(tmp.rows):
tmp[i,i] = S.One
return tmp
@property
def is_square(self):
return self.rows == self.cols
def is_upper(self):
for i in range(self.cols):
for j in range(self.rows):
if i > j and self[i,j] != 0:
return False
return True
def is_lower(self):
for i in range(self.cols):
for j in range(self.rows):
if i < j and self[i, j] != 0:
return False
return True
def is_symbolic(self):
for i in range(self.cols):
for j in range(self.rows):
if self[i,j].atoms(Symbol):
return True
return False
def clone(self):
return Matrix(self.rows, self.cols, lambda i, j: self[i, j])
def det(self, method="bareis"):
"""
Computes the matrix determinant using the method "method".
Possible values for "method":
bareis ... det_bareis
berkowitz ... berkowitz_det
"""
if method == "bareis":
return self.det_bareis()
elif method == "berkowitz":
return self.berkowitz_det()
else:
raise ValueError("Determinant method unrecognized")
def det_bareis(self):
"""Compute matrix determinant using Bareis' fraction-free
algorithm which is an extension of the well known Gaussian
elimination method. This approach is best suited for dense
symbolic matrices and will result in a determinant with
minimal number of fractions. It means that less term
rewriting is needed on resulting formulae.
TODO: Implement algorithm for sparse matrices (SFF).
"""
if not self.is_square:
raise NonSquareMatrixException()
M, n = self[:,:], self.rows
if n == 1:
det = M[0, 0]
elif n == 2:
det = M[0, 0]*M[1, 1] - M[0, 1]*M[1, 0]
else:
sign = 1 # track current sign in case of column swap
for k in range(n-1):
# look for a pivot in the current column
# and assume det == 0 if none is found
if M[k, k] == 0:
for i in range(k+1, n):
if M[i, k] != 0:
M.row_swap(i, k)
sign *= -1
break
else:
return S.Zero
# proceed with Bareis' fraction-free (FF)
# form of Gaussian elimination algorithm
for i in range(k+1, n):
for j in range(k+1, n):
D = M[k, k]*M[i, j] - M[i, k]*M[k, j]
if k > 0:
D /= M[k-1, k-1]
if D.is_Atom:
M[i, j] = D
else:
M[i, j] = cancel(D)
det = sign * M[n-1, n-1]
return det.expand()
def adjugate(self, method="berkowitz"):
"""
Returns the adjugate matrix.
Adjugate matrix is the transpose of the cofactor matrix.
http://en.wikipedia.org/wiki/Adjugate
See also: .cofactorMatrix(), .T
"""
return self.cofactorMatrix(method).T
def inverse_LU(self, iszerofunc=_iszero):
"""
Calculates the inverse using LU decomposition.
"""
return self.LUsolve(self.eye(self.rows), iszerofunc=_iszero)
def inverse_GE(self, iszerofunc=_iszero):
"""
Calculates the inverse using Gaussian elimination.
"""
assert self.rows == self.cols
assert self.det() != 0
big = self.row_join(self.eye(self.rows))
red = big.rref(iszerofunc=iszerofunc)
return red[0][:,big.rows:]
def inverse_ADJ(self):
"""
Calculates the inverse using the adjugate matrix and a determinant.
"""
assert self.rows == self.cols
d = self.berkowitz_det()
assert d != 0
return self.adjugate()/d
def rref(self,simplified=False, iszerofunc=_iszero):
"""
Take any matrix and return reduced row-echelon form and indices of pivot vars
To simplify elements before finding nonzero pivots set simplified=True
"""
# TODO: rewrite inverse_GE to use this
pivots, r = 0, self[:,:] # pivot: index of next row to contain a pivot
pivotlist = [] # indices of pivot variables (non-free)
for i in range(r.cols):
if pivots == r.rows:
break
if simplified:
r[pivots,i] = simplify(r[pivots,i])
if iszerofunc(r[pivots,i]):
for k in range(pivots, r.rows):
if simplified and k>pivots:
r[k,i] = simplify(r[k,i])
if not iszerofunc(r[k,i]):
break
if k == r.rows - 1 and iszerofunc(r[k,i]):
continue
r.row_swap(pivots,k)
scale = r[pivots,i]
r.row(pivots, lambda x, _: x/scale)
for j in range(r.rows):
if j == pivots:
continue
scale = r[j,i]
r.row(j, lambda x, k: x - r[pivots,k]*scale)
pivotlist.append(i)
pivots += 1
return r, pivotlist
def nullspace(self,simplified=False):
"""
Returns list of vectors (Matrix objects) that span nullspace of self
"""
reduced, pivots = self.rref(simplified)
basis = []
# create a set of vectors for the basis
for i in range(self.cols - len(pivots)):
basis.append(zeros((self.cols, 1)))
# contains the variable index to which the vector corresponds
basiskey, cur = [-1]*len(basis), 0
for i in range(self.cols):
if i not in pivots:
basiskey[cur] = i
cur += 1
for i in range(self.cols):
if i not in pivots: # free var, just set vector's ith place to 1
basis[basiskey.index(i)][i,0] = 1
else: # add negative of nonpivot entry to corr vector
for j in range(i+1, self.cols):
line = pivots.index(i)
if reduced[line, j] != 0:
assert j not in pivots
basis[basiskey.index(j)][i,0] = -1 * reduced[line, j]
return basis
def berkowitz(self):
"""The Berkowitz algorithm.
Given N x N matrix with symbolic content, compute efficiently
coefficients of characteristic polynomials of 'self' and all
its square sub-matrices composed by removing both i-th row
and column, without division in the ground domain.
This method is particularly useful for computing determinant,
principal minors and characteristic polynomial, when 'self'
has complicated coefficients e.g. polynomials. Semi-direct
usage of this algorithm is also important in computing
efficiently sub-resultant PRS.
Assuming that M is a square matrix of dimension N x N and
I is N x N identity matrix, then the following following
definition of characteristic polynomial is begin used:
charpoly(M) = det(t*I - M)
As a consequence, all polynomials generated by Berkowitz
algorithm are monic.
>>> from sympy import Matrix
>>> from sympy.abc import x, y, z
>>> M = Matrix([ [x,y,z], [1,0,0], [y,z,x] ])
>>> p, q, r = M.berkowitz()
>>> print p # 1 x 1 M's sub-matrix
(1, -x)
>>> print q # 2 x 2 M's sub-matrix
(1, -x, -y)
>>> print r # 3 x 3 M's sub-matrix
(1, -2*x, -y - y*z + x**2, x*y - z**2)
For more information on the implemented algorithm refer to:
[1] S.J. Berkowitz, On computing the determinant in small
parallel time using a small number of processors, ACM,
Information Processing Letters 18, 1984, pp. 147-150
[2] M. Keber, Division-Free computation of sub-resultants
using Bezout matrices, Tech. Report MPI-I-2006-1-006,
Saarbrucken, 2006
"""
if not self.is_square:
raise NonSquareMatrixException()
A, N = self, self.rows
transforms = [0] * (N-1)
for n in xrange(N, 1, -1):
T, k = zeros((n+1,n)), n - 1
R, C = -A[k,:k], A[:k,k]
A, a = A[:k,:k], -A[k,k]
items = [ C ]
for i in xrange(0, n-2):
items.append(A * items[i])
for i, B in enumerate(items):
items[i] = (R * B)[0,0]
items = [ S.One, a ] + items
for i in xrange(n):
T[i:,i] = items[:n-i+1]
transforms[k-1] = T
polys = [ Matrix([S.One, -A[0,0]]) ]
for i, T in enumerate(transforms):
polys.append(T * polys[i])
return tuple(map(tuple, polys))
def berkowitz_det(self):
"""Computes determinant using Berkowitz method."""
poly = self.berkowitz()[-1]
sign = (-1)**(len(poly)-1)
return sign * poly[-1]
def berkowitz_minors(self):
"""Computes principal minors using Berkowitz method."""
sign, minors = S.NegativeOne, []
for poly in self.berkowitz():
minors.append(sign*poly[-1])
sign = -sign
return tuple(minors)
def berkowitz_charpoly(self, x):
"""Computes characteristic polynomial minors using Berkowitz method."""
coeffs, monoms = self.berkowitz()[-1], range(self.rows+1)
return Poly(dict(zip(reversed(monoms), coeffs)), x)
charpoly = berkowitz_charpoly
def berkowitz_eigenvals(self, **flags):
"""Computes eigenvalues of a Matrix using Berkowitz method. """
return roots(self.berkowitz_charpoly(Symbol('x', dummy=True)), **flags)
eigenvals = berkowitz_eigenvals
def eigenvects(self, **flags):
"""Return list of triples (eigenval, multiplicity, basis)."""
if 'multiple' in flags:
del flags['multiple']
out, vlist = [], self.eigenvals(**flags)
for r, k in vlist.iteritems():
tmp = self - eye(self.rows)*r
basis = tmp.nullspace()
# whether tmp.is_symbolic() is True or False, it is possible that
# the basis will come back as [] in which case simplification is
# necessary.
if not basis:
# The nullspace routine failed, try it again with simplification
basis = tmp.nullspace(simplified=True)
out.append((r, k, basis))
return out
def fill(self, value):
"""Fill the matrix with the scalar value."""
self.mat = [value] * self.rows * self.cols
def __getattr__(self, attr):
if attr in ('diff','integrate','limit'):
def doit(*args):
item_doit = lambda item: getattr(item, attr)(*args)
return self.applyfunc( item_doit )
return doit
else:
raise AttributeError()
def vec(self):
"""
Return the Matrix converted into a one column matrix by stacking columns
>>> from sympy import Matrix
>>> m=Matrix([ [1,3], [2,4] ])
>>> m
[1, 3]
[2, 4]
>>> m.vec()
[1]
[2]
[3]
[4]
"""
return Matrix(self.cols*self.rows, 1, self.transpose().mat)
def vech(self, diagonal=True, check_symmetry=True):
"""
Return the unique elements of a symmetric Matrix as a one column matrix
by stacking
the elements in the lower triangle
Arguments:
diagonal -- include the diagonal cells of self or not
check_symmetry -- checks symmetry of self but not completely reliably
>>> from sympy import Matrix
>>> m=Matrix([ [1,2], [2,3] ])
>>> m
[1, 2]
[2, 3]
>>> m.vech()
[1]
[2]
[3]
>>> m.vech(diagonal=False)
[2]
"""
c = self.cols
if c != self.rows:
raise TypeError("Matrix must be square")
if check_symmetry:
self.simplify()
if self != self.transpose():
raise ValueError("Matrix appears to be asymmetric; consider check_symmetry=False")
count = 0
if diagonal:
v = zeros( (c * (c + 1) // 2, 1) )
for j in xrange(c):
for i in xrange(j,c):
v[count] = self[i,j]
count += 1
else:
v = zeros( (c * (c - 1) // 2, 1) )
for j in xrange(c):
for i in xrange(j+1,c):
v[count] = self[i,j]
count += 1
return v
def get_diag_blocks(self):
"""Obtains the square sub-matrices on the main diagonal of a square matrix.
Useful for inverting symbolic matrices or solving systems of
linear equations which may be decoupled by having a block diagonal
structure.
Example:
>>> from sympy import Matrix, symbols
>>> from sympy.abc import x, y, z
>>> A = Matrix([[1, 3, 0, 0], [y, z*z, 0, 0], [0, 0, x, 0], [0, 0, 0, 0]])
>>> a1, a2, a3 = A.get_diag_blocks()
>>> a1
[1, 3]
[y, z**2]
>>> a2
[x]
>>> a3
[0]
>>>
"""
sub_blocks = []
def recurse_sub_blocks(M):
i = 1
while i <= M.shape[0]:
if i == 1:
to_the_right = M[0, i:]
to_the_bottom = M[i:, 0]
else:
to_the_right = M[0:i, i:]
to_the_bottom = M[i:, 0:i]
if any(to_the_right) or any(to_the_bottom):
i += 1
continue
else:
sub_blocks.append(M[0:i, 0:i])
if M.shape == M[0:i, 0:i].shape:
return
else:
recurse_sub_blocks(M[i:, i:])
return
recurse_sub_blocks(self)
return sub_blocks
def matrix_multiply(A, B):
"""
Matrix product A*B.
A and B must be of appropriate dimensions. If A is a m x k matrix, and B
is a k x n matrix, the product will be an m x n matrix.
Example:
>>> from sympy import Matrix
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> B = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> A*B
[30, 36, 42]
[66, 81, 96]
>>> B*A
Traceback (most recent call last):
...
ShapeError
>>>
"""
# The following implmentation is equivalent, but about 5% slower
#ma, na = A.shape
#mb, nb = B.shape
#
#if na != mb:
# raise ShapeError()
#product = Matrix(ma, nb, lambda i,j: 0)
#for i in xrange(ma):
# for j in xrange(nb):
# s = 0
# for k in range(na):
# s += A[i, k]*B[k, j]
# product[i, j] = s
#return product
if A.shape[1] != B.shape[0]:
raise ShapeError()
blst = B.T.tolist()
alst = A.tolist()
return Matrix(A.shape[0], B.shape[1], lambda i, j:
reduce(lambda k, l: k+l,
map(lambda n, m: n*m,
alst[i],
blst[j])))
def matrix_add(A,B):
"""Return A+B"""
if A.shape != B.shape:
raise ShapeError()
alst = A.tolist()
blst = B.tolist()
ret = [0]*A.shape[0]
for i in xrange(A.shape[0]):
ret[i] = map(lambda j,k: j+k, alst[i], blst[i])
return Matrix(ret)
def zero(n):
"""Create square zero matrix n x n"""
warnings.warn( 'Deprecated: use zeros() instead.' )
return zeronm(n,n)
def zeronm(n,m):
"""Create zero matrix n x m"""
warnings.warn( 'Deprecated: use zeros() instead.' )
assert n>0
assert m>0
return Matrix(n,m,[S.Zero]*m*n)
def zeros(dims):
"""Create zero matrix of dimensions dims = (d1,d2)"""
n, m = _dims_to_nm(dims)
return Matrix(n, m, [S.Zero]*m*n)
def one(n):
"""Create square all-one matrix n x n"""
warnings.warn( 'Deprecated: use ones() instead.' )
return Matrix(n,n,[S.One]*n*n)
def ones(dims):
"""Create all-one matrix of dimensions dims = (d1,d2)"""
n, m = _dims_to_nm( dims )
return Matrix(n, m, [S.One]*m*n)
def eye(n):
"""Create square identity matrix n x n"""
n = int(n)
out = zeros(n)
for i in range(n):
out[i, i] = S.One
return out
def randMatrix(r,c,min=0,max=99,seed=[]):
"""Create random matrix r x c"""
if seed == []:
prng = random.Random() # use system time
else:
prng = random.Random(seed)
return Matrix(r,c,lambda i,j: prng.randint(min,max))
def hessian(f, varlist):
"""Compute Hessian matrix for a function f
see: http://en.wikipedia.org/wiki/Hessian_matrix
"""
# f is the expression representing a function f, return regular matrix
if isinstance(varlist, (list, tuple)):
m = len(varlist)
elif isinstance(varlist, Matrix):
m = varlist.cols
assert varlist.rows == 1
else:
raise ValueError("Improper variable list in hessian function")
assert m > 0
try:
f.diff(varlist[0]) # check differentiability
except AttributeError:
raise ValueError("Function %d is not differentiable" % i)
out = zeros(m)
for i in range(m):
for j in range(i,m):
out[i,j] = f.diff(varlist[i]).diff(varlist[j])
for i in range(m):
for j in range(i):
out[i,j] = out[j,i]
return out
def GramSchmidt(vlist, orthog=False):
out = []
m = len(vlist)
for i in range(m):
tmp = vlist[i]
for j in range(i):
tmp -= vlist[i].project(out[j])
if tmp == Matrix([[0,0,0]]):
raise ValueError("GramSchmidt: vector set not linearly independent")
out.append(tmp)
if orthog:
for i in range(len(out)):
out[i] = out[i].normalized()
return out
def wronskian(functions, var, method='bareis'):
"""Compute Wronskian for [] of functions
| f1 f2 ... fn |
| f1' f2' ... fn' |
| . . . . |
W(f1,...,fn) = | . . . . |
| . . . . |
| n n n |
| D(f1) D(f2) ... D(fn)|
see: http://en.wikipedia.org/wiki/Wronskian
"""
for index in xrange(0, len(functions)):
functions[index] = sympify(functions[index])
n = len(functions)
if n == 0:
return 1
W = Matrix(n, n, lambda i,j: functions[i].diff(var, j) )
return W.det(method)
def casoratian(seqs, n, zero=True):
"""Given linear difference operator L of order 'k' and homogeneous
equation Ly = 0 we want to compute kernel of L, which is a set
of 'k' sequences: a(n), b(n), ... z(n).
Solutions of L are linearly independent iff their Casoratian,
denoted as C(a, b, ..., z), do not vanish for n = 0.
Casoratian is defined by k x k determinant:
+ a(n) b(n) . . . z(n) +
| a(n+1) b(n+1) . . . z(n+1) |
| . . . . |
| . . . . |
| . . . . |
+ a(n+k-1) b(n+k-1) . . . z(n+k-1) +
It proves very useful in rsolve_hyper() where it is applied
to a generating set of a recurrence to factor out linearly
dependent solutions and return a basis.
>>> from sympy import Symbol, casoratian, factorial
>>> n = Symbol('n', integer=True)
Exponential and factorial are linearly independent:
>>> casoratian([2**n, factorial(n)], n) != 0
True
"""
seqs = map(sympify, seqs)
if not zero:
f = lambda i, j: seqs[j].subs(n, n+i)
else:
f = lambda i, j: seqs[j].subs(n, i)
k = len(seqs)
return Matrix(k, k, f).det()
def block_diag(matrices):
"""
Constructs a block diagonal matrix from a list of square matrices.
Example:
>>> from sympy import block_diag, symbols, Matrix
>>> from sympy.abc import a, b, c, x, y, z
>>> a = Matrix([[1, 2], [2, 3]])
>>> b = Matrix([[3, x], [y, 3]])
>>> block_diag([a, b, b])
[1, 2, 0, 0, 0, 0]
[2, 3, 0, 0, 0, 0]
[0, 0, 3, x, 0, 0]
[0, 0, y, 3, 0, 0]
[0, 0, 0, 0, 3, x]
[0, 0, 0, 0, y, 3]
"""
rows = 0
for m in matrices:
assert m.rows == m.cols, "All matrices must be square."
rows += m.rows
A = zeros((rows, rows))
i = 0
for m in matrices:
A[i+0:i+m.rows, i+0:i+m.cols] = m
i += m.rows
return A
class SMatrix(Matrix):
"""Sparse matrix"""
def __init__(self, *args):
if len(args) == 3 and callable(args[2]):
op = args[2]
assert isinstance(args[0], int) and isinstance(args[1], int)
self.rows = args[0]
self.cols = args[1]
self.mat = {}
for i in range(self.rows):
for j in range(self.cols):
value = sympify(op(i,j))
if value != 0:
self.mat[(i,j)] = value
elif len(args)==3 and isinstance(args[0],int) and \
isinstance(args[1],int) and isinstance(args[2], (list, tuple)):
self.rows = args[0]
self.cols = args[1]
mat = args[2]
self.mat = {}
for i in range(self.rows):
for j in range(self.cols):
value = sympify(mat[i*self.cols+j])
if value != 0:
self.mat[(i,j)] = value
elif len(args)==3 and isinstance(args[0],int) and \
isinstance(args[1],int) and isinstance(args[2], dict):
self.rows = args[0]
self.cols = args[1]
self.mat = {}
# manual copy, copy.deepcopy() doesn't work
for key in args[2].keys():
self.mat[key] = args[2][key]
else:
if len(args) == 1:
mat = args[0]
else:
mat = args
if not isinstance(mat[0], (list, tuple)):
mat = [ [element] for element in mat ]
self.rows = len(mat)
self.cols = len(mat[0])
self.mat = {}
for i in range(self.rows):
assert len(mat[i]) == self.cols
for j in range(self.cols):
value = sympify(mat[i][j])
if value != 0:
self.mat[(i,j)] = value
def __getitem__(self, key):
if isinstance(key, slice) or isinstance(key, int):
lo, hi = self.slice2bounds(key, self.rows*self.cols)
L = []
for i in range(lo, hi):
m,n = self.rowdecomp(i)
if self.mat.has_key((m,n)):
L.append(self.mat[(m,n)])
else:
L.append(0)
if len(L) == 1:
return L[0]
else:
return L
assert len(key) == 2
if isinstance(key[0], int) and isinstance(key[1], int):
i,j=self.key2ij(key)
if (i, j) in self.mat:
return self.mat[(i,j)]
else:
return 0
elif isinstance(key[0], slice) or isinstance(key[1], slice):
return self.submatrix(key)
else:
raise IndexError("Index out of range: a[%s]"%repr(key))
def rowdecomp(self, num):
assert (0 <= num < self.rows * self.cols) or \
(0 <= -1*num < self.rows * self.cols)
i, j = 0, num
while j >= self.cols:
j -= self.cols
i += 1
return i,j
def __setitem__(self, key, value):
# almost identical, need to test for 0
assert len(key) == 2
if isinstance(key[0], slice) or isinstance(key[1], slice):
if isinstance(value, Matrix):
self.copyin_matrix(key, value)
if isinstance(value, (list, tuple)):
self.copyin_list(key, value)
else:
i,j=self.key2ij(key)
testval = sympify(value)
if testval != 0:
self.mat[(i,j)] = testval
elif self.mat.has_key((i,j)):
del self.mat[(i,j)]
def row_del(self, k):
newD = {}
for (i,j) in self.mat.keys():
if i==k:
pass
elif i > k:
newD[i-1,j] = self.mat[i,j]
else:
newD[i,j] = self.mat[i,j]
self.mat = newD
self.rows -= 1
def col_del(self, k):
newD = {}
for (i,j) in self.mat.keys():
if j==k:
pass
elif j > k:
newD[i,j-1] = self.mat[i,j]
else:
newD[i,j] = self.mat[i,j]
self.mat = newD
self.cols -= 1
def toMatrix(self):
l = []
for i in range(self.rows):
c = []
l.append(c)
for j in range(self.cols):
if (i, j) in self.mat:
c.append(self[i, j])
else:
c.append(0)
return Matrix(l)
# from here to end all functions are same as in matrices.py
# with Matrix replaced with SMatrix
def copyin_list(self, key, value):
assert isinstance(value, (list, tuple))
self.copyin_matrix(key, SMatrix(value))
def multiply(self,b):
"""Returns self*b """
def dotprod(a,b,i,j):
assert a.cols == b.rows
r=0
for x in range(a.cols):
r+=a[i,x]*b[x,j]
return r
r = SMatrix(self.rows, b.cols, lambda i,j: dotprod(self,b,i,j))
if r.rows == 1 and r.cols ==1:
return r[0,0]
return r
def submatrix(self, keys):
assert isinstance(keys[0], slice) or isinstance(keys[1], slice)
rlo, rhi = self.slice2bounds(keys[0], self.rows)
clo, chi = self.slice2bounds(keys[1], self.cols)
if not ( 0<=rlo<=rhi and 0<=clo<=chi ):
raise IndexError("Slice indices out of range: a[%s]"%repr(keys))
return SMatrix(rhi-rlo, chi-clo, lambda i,j: self[i+rlo, j+clo])
def reshape(self, _rows, _cols):
if self.rows*self.cols != _rows*_cols:
print "Invalid reshape parameters %d %d" % (_rows, _cols)
newD = {}
for i in range(_rows):
for j in range(_cols):
m,n = self.rowdecomp(i*_cols + j)
if self.mat.has_key((m,n)):
newD[(i,j)] = self.mat[(m,n)]
return SMatrix(_rows, _cols, newD)
def cross(self, b):
assert isinstance(b, (list, tuple, Matrix))
if not (self.rows == 1 and self.cols == 3 or \
self.rows == 3 and self.cols == 1 ) and \
(b.rows == 1 and b.cols == 3 or \
b.rows == 3 and b.cols == 1):
raise ValueError("Dimensions incorrect for cross product")
else:
return SMatrix(1,3,((self[1]*b[2] - self[2]*b[1]),
(self[2]*b[0] - self[0]*b[2]),
(self[0]*b[1] - self[1]*b[0])))
def zeronm(self,n,m):
warnings.warn( 'Deprecated: use zeros() instead.' )
return SMatrix(n,m,{})
def zero(self, n):
warnings.warn( 'Deprecated: use zeros() instead.' )
return SMatrix(n,n,{})
def zeros(self, dims):
"""Returns a dims = (d1,d2) matrix of zeros."""
n, m = _dims_to_nm( dims )
return SMatrix(n,m,{})
def eye(self, n):
tmp = SMatrix(n,n,lambda i,j:0)
for i in range(tmp.rows):
tmp[i,i] = 1
return tmp
def list2numpy(l):
"""Converts python list of SymPy expressions to a NumPy array."""
from numpy import empty
a = empty(len(l), dtype=object)
for i, s in enumerate(l):
a[i] = s
return a
def matrix2numpy(m):
"""Converts SymPy's matrix to a NumPy array."""
from numpy import empty
a = empty(m.shape, dtype=object)
for i in range(m.rows):
for j in range(m.cols):
a[i, j] = m[i, j]
return a
def a2idx(a):
"""
Tries to convert "a" to an index, returns None on failure.
The result of a2idx() (if not None) can be safely used as an index to
arrays/matrices.
"""
if hasattr(a, "__int__"):
return int(a)
if hasattr(a, "__index__"):
return a.__index__()
def symarray(prefix, shape):
"""Create a numpy ndarray of symbols (as an object array).
The created symbols are named prefix_i1_i2_... You should thus provide a
non-empty prefix if you want your symbols to be unique for different output
arrays, as Sympy symbols with identical names are the same object.
Parameters
----------
prefix : string
A prefix prepended to the name of every symbol.
shape : int or tuple
Shape of the created array. If an int, the array is one-dimensional; for
more than one dimension the shape must be a tuple.
Examples
--------
>> from sympy import symarray
>> symarray('', 3)
[_0 _1 _2]
If you want multiple symarrays to contain distinct symbols, you *must*
provide unique prefixes:
>> a = symarray('', 3)
>> b = symarray('', 3)
>> a[0] is b[0]
True
>> a = symarray('a', 3)
>> b = symarray('b', 3)
>> a[0] is b[0]
False
Creating symarrays with a prefix:
>> symarray('a', 3)
[a_0 a_1 a_2]
For more than one dimension, the shape must be given as a tuple:
>> symarray('a', (2,3))
[[a_0_0 a_0_1 a_0_2]
[a_1_0 a_1_1 a_1_2]]
>> symarray('a', (2,3,2))
[[[a_0_0_0 a_0_0_1]
[a_0_1_0 a_0_1_1]
[a_0_2_0 a_0_2_1]]
<BLANKLINE>
[[a_1_0_0 a_1_0_1]
[a_1_1_0 a_1_1_1]
[a_1_2_0 a_1_2_1]]]
"""
try:
import numpy as np
except ImportError:
raise ImportError("symarray requires numpy to be installed")
arr = np.empty(shape, dtype=object)
for index in np.ndindex(shape):
arr[index] = Symbol('%s_%s' % (prefix, '_'.join(map(str, index))))
return arr
| {
"content_hash": "0b5c96daca62d840530d416f7ecbe38e",
"timestamp": "",
"source": "github",
"line_count": 2127,
"max_line_length": 100,
"avg_line_length": 31.698166431593794,
"alnum_prop": 0.48460443178784374,
"repo_name": "tovrstra/sympy",
"id": "14ada69963338d9d3b730557c7dcfd2b719d60a8",
"size": "67422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sympy/matrices/matrices.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "8282995"
},
{
"name": "Scheme",
"bytes": "125"
}
],
"symlink_target": ""
} |
import copy
import os
import sys
import time
import unittest
from app import generator, schedulerv2, sync
try:
import kvm_player
except ImportError:
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import kvm_player
class TestKVMK8sEtcdOperator(kvm_player.KernelVirtualMachinePlayer):
@classmethod
def setUpClass(cls):
cls.running_requirements()
cls.set_acserver()
cls.set_rack0()
cls.set_api()
cls.set_matchbox()
cls.set_dnsmasq()
cls.pause(cls.wait_setup_teardown)
# @unittest.skip("skip")
class TestKVMK8SEtcdOperator0(TestKVMK8sEtcdOperator):
# @unittest.skip("just skip")
def test_00(self):
self.assertEqual(self.fetch_discovery_interfaces(), [])
nb_node = 3
marker = "euid-%s-%s" % (TestKVMK8sEtcdOperator.__name__.lower(), self.test_00.__name__)
nodes = ["%s-%d" % (marker, i) for i in range(nb_node)]
gen = generator.Generator(
api_uri=self.api_uri,
profile_id="%s" % marker,
name="%s" % marker,
ignition_id="%s.yaml" % marker,
matchbox_path=self.test_matchbox_path
)
gen.dumps()
sy = sync.ConfigSyncSchedules(
api_uri=self.api_uri,
matchbox_path=self.test_matchbox_path,
ignition_dict={
"etcd_member_kubernetes_control_plane": "%s-%s" % (marker, "k8s-control-plane")
}
)
for m in nodes:
destroy, undefine = ["virsh", "destroy", m], \
["virsh", "undefine", m]
self.virsh(destroy, v=self.dev_null), self.virsh(undefine, v=self.dev_null)
try:
for i, m in enumerate(nodes):
virt_install = self.create_virtual_machine(m, nb_node)
self.virsh(virt_install, assertion=True, v=self.dev_null)
time.sleep(self.testing_sleep_seconds) # KVM fail to associate nic
time.sleep(self.testing_sleep_seconds * self.testing_sleep_seconds)
sch_cp = schedulerv2.EtcdMemberKubernetesControlPlane(self.api_uri)
sch_cp.expected_nb = 3
for i in range(60):
if sch_cp.apply() is True:
sy.apply()
break
time.sleep(self.testing_sleep_seconds)
self.assertTrue(sch_cp.apply())
sy.apply()
time.sleep(self.testing_sleep_seconds * self.testing_sleep_seconds)
to_start = copy.deepcopy(nodes)
self.kvm_restart_off_machines(to_start)
time.sleep(self.testing_sleep_seconds * nb_node)
for i in range(nb_node + 1):
self.etcd_member_len(sy.kubernetes_control_plane_ip_list[0], sch_cp.expected_nb,
self.ec.vault_etcd_client_port, verify=False)
self.etcd_endpoint_health(sy.kubernetes_control_plane_ip_list, self.ec.vault_etcd_client_port,
verify=False)
if i == 0:
self.vault_self_certs(sy.kubernetes_control_plane_ip_list[0], self.ec.vault_etcd_client_port)
self.save_unseal_key(sy.kubernetes_control_plane_ip_list)
self.vault_verifing_issuing_ca(sy.kubernetes_control_plane_ip_list[0],
self.ec.vault_etcd_client_port)
self.vault_issue_app_certs(sy.kubernetes_control_plane_ip_list[0], self.ec.vault_etcd_client_port)
self.unseal_all_vaults(sy.kubernetes_control_plane_ip_list, self.ec.vault_etcd_client_port)
self.etcd_member_len(sy.kubernetes_control_plane_ip_list[0], sch_cp.expected_nb,
self.ec.kubernetes_etcd_client_port, certs_name="etcd-kubernetes_client")
self.etcd_member_len(sy.kubernetes_control_plane_ip_list[0], sch_cp.expected_nb,
self.ec.fleet_etcd_client_port, certs_name="etcd-fleet_client")
self.etcd_endpoint_health(sy.kubernetes_control_plane_ip_list, self.ec.kubernetes_etcd_client_port,
certs_name="etcd-kubernetes_client")
self.etcd_endpoint_health(sy.kubernetes_control_plane_ip_list + sy.kubernetes_nodes_ip_list,
self.ec.fleet_etcd_client_port, certs_name="etcd-fleet_client")
self.kube_apiserver_health(sy.kubernetes_control_plane_ip_list)
self.kubernetes_node_nb(sy.kubernetes_control_plane_ip_list[0], nb_node)
m = "%s-%d" % (marker, i)
self.virsh(["virsh", "reset", m])
time.sleep(self.testing_sleep_seconds * self.testing_sleep_seconds)
self.write_ending(marker)
finally:
if os.getenv("TEST"):
self.iteractive_usage(api_server_uri="https://%s:6443" % sy.kubernetes_control_plane_ip_list[0])
for i in range(nb_node):
machine_marker = "%s-%d" % (marker, i)
destroy, undefine = ["virsh", "destroy", "%s" % machine_marker], \
["virsh", "undefine", "%s" % machine_marker]
self.virsh(destroy)
self.virsh(undefine)
if __name__ == "__main__":
unittest.main(defaultTest=os.getenv("TEST"))
| {
"content_hash": "b1dc8e6f78d80b33d282ddbdb396c2e0",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 118,
"avg_line_length": 43.864,
"alnum_prop": 0.5619186576691593,
"repo_name": "nyodas/enjoliver",
"id": "5ba61b5e86dd16d93f1414f329001f5672b5bdeb",
"size": "5483",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/tests/euid/k8s/test_kvm_k8s_etcd_operator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "32"
},
{
"name": "Go",
"bytes": "32073"
},
{
"name": "HTML",
"bytes": "2485"
},
{
"name": "JavaScript",
"bytes": "2460"
},
{
"name": "Makefile",
"bytes": "41832"
},
{
"name": "Python",
"bytes": "462357"
},
{
"name": "Roff",
"bytes": "1036"
},
{
"name": "Shell",
"bytes": "52881"
}
],
"symlink_target": ""
} |
from .resource import Resource
class SharedAccessAuthorizationRuleResource(Resource):
"""Description of a namespace authorization rule.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Resource location
:type location: str
:param tags: Resource tags
:type tags: dict
:param rights: The rights associated with the rule.
:type rights: list of str or :class:`AccessRights
<azure.mgmt.eventhub.models.AccessRights>`
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'rights': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'rights': {'key': 'properties.rights', 'type': '[AccessRights]'},
}
def __init__(self, location, rights, tags=None):
super(SharedAccessAuthorizationRuleResource, self).__init__(location=location, tags=tags)
self.rights = rights
| {
"content_hash": "209d243fcc9b4247a251b80c36ef23e1",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 97,
"avg_line_length": 32.18181818181818,
"alnum_prop": 0.5918079096045198,
"repo_name": "rjschwei/azure-sdk-for-python",
"id": "d4515ef0e7c72866167516701aaf9d781718bdb0",
"size": "1890",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-eventhub/azure/mgmt/eventhub/models/shared_access_authorization_rule_resource.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8317911"
}
],
"symlink_target": ""
} |
from subprocess import call
import re
from common import *
class _AnyNumber(int):
def __eq__(self, other):
return True
ANY = _AnyNumber(1)
class PathTester(object):
def __init__(self, test, root):
self.test = test
self.root = root
self.paths = []
self.matched = set()
self.scan()
def scan(self):
paths = []
for dir_name, dir_names, file_names in os.walk(self.root):
for name in dir_names:
path = os.path.join(dir_name, name)[len(self.root):] + '/'
paths.append(path)
for name in file_names:
path = os.path.join(dir_name, name)[len(self.root):]
paths.append(path)
for pattern in (r'\._', r'\.DS_Store$', r'.*\.pyc$'):
paths = [path for path in paths if not re.match(pattern, path)]
paths = [x for x in paths if x not in self.matched]
self.paths.extend(paths)
print 'PathTester found', len(paths), 'new items'
# if paths:
# print '\n'.join('\t' + x for x in sorted(paths))
def __enter__(self):
self.scan()
def __exit__(self, *args):
self.assertMatchedAll()
def assertMatches(self, count, pattern, mode=None, msg=None):
if not pattern:
self.fail('no pattern specified')
full_pattern = pattern + r'$'
if full_pattern[0] != '/':
full_pattern = r'(?:/[^/]*)*?/' + full_pattern
paths = self.paths
self.paths = []
for path in paths:
if re.match(full_pattern, path):
if mode is None:
test_mode = 0777 if path.endswith('/') else 0666
else:
test_mode = mode
stat = os.stat(os.path.join(self.root, path.strip('/')))
self.test.assertEqual(stat.st_mode & 0777, test_mode, 'permissions differ on %r; %o != %o' % (path, stat.st_mode & 0777, test_mode))
self.matched.add(path)
else:
self.paths.append(path)
self.test.assertEqual(
count,
len(paths) - len(self.paths),
msg or ('found %d, expected %d via %r; %d remain:\n\t' % (len(paths) - len(self.paths), count, pattern, len(self.paths))) + '\n\t'.join(sorted(self.paths))
)
def assertMatchedAll(self, msg=None):
self.test.assertFalse(self.paths, msg or ('%d paths remain:\n\t' % len(self.paths)) + '\n\t'.join(sorted(self.paths)))
def assertProject(self):
self.assertMatches(1, r'/Assets/')
self.assertMatches(1, r'/SEQ/')
self.assertMatches(1, r'/\.sgfs/')
self.assertMatches(1, r'/\.sgfs/cache\.sqlite')
self.assertMatches(1, r'/\.sgfs\.yml')
def assertAssetType(self, count):
self.assertMatches(count, r'/Assets/(Character|Vehicle)/')
def assertAsset(self, count):
self.assertMatches(count, r'/Assets/(Character|Vehicle)/(\1_\d+)/')
self.assertMatches(count, r'/Assets/(Character|Vehicle)/(\1_\d+)/\.sgfs\.yml')
def assertAssetTask(self, count, type_, **kwargs):
self._assertTask(count, r'/Assets/(Character|Vehicle)/(\1_\d+)', type_, **kwargs)
def _assertTask(self, count, base, type_):
self.assertMatches(count, base + r'/%s/' % type_)
self.assertMatches(ANY, base + r'/%s/\.sgfs\.yml' % type_)
self.assertMatches(ANY, base + r'/%s/dailies/' % type_)
if type_ in ('Anm', 'Model', 'Light'):
self.assertMatches(count, base + r'/%s/maya/' % type_)
self.assertMatches(count, base + r'/%s/maya/images/' % type_)
self.assertMatches(count, base + r'/%s/maya/published/' % type_)
self.assertMatches(count, base + r'/%s/maya/scenes/' % type_)
self.assertMatches(count, base + r'/%s/maya/sourceimages/' % type_)
self.assertMatches(count, base + r'/%s/maya/workspace.mel' % type_)
if type_ in ('Comp', 'Light'):
self.assertMatches(count, base + r'/%s/nuke/' % type_)
self.assertMatches(count, base + r'/%s/nuke/published/' % type_)
self.assertMatches(count, base + r'/%s/nuke/renders/' % type_)
self.assertMatches(count, base + r'/%s/nuke/scripts/' % type_)
if type_ in ('Comp', ):
self.assertMatches(count, base + r'/%s/nuke/renders/cleanplates/' % type_)
self.assertMatches(count, base + r'/%s/nuke/renders/elements/' % type_)
self.assertMatches(count, base + r'/%s/nuke/renders/mattes/' % type_)
self.assertMatches(count, base + r'/%s/nuke/scripts/comp/' % type_)
self.assertMatches(count, base + r'/%s/nuke/scripts/precomp/' % type_)
self.assertMatches(count, base + r'/%s/nuke/scripts/precomp/cleanplate/' % type_)
self.assertMatches(count, base + r'/%s/nuke/scripts/precomp/elements/' % type_)
self.assertMatches(count, base + r'/%s/nuke/scripts/precomp/roto/' % type_)
def assertSequence(self, count):
self.assertMatches(count, r'/SEQ/(\w{2})/')
self.assertMatches(count, r'/SEQ/(\w{2})/\.sgfs\.yml')
def assertShot(self, count):
self.assertMatches(count, r'/SEQ/(\w{2})/\1_\d{3}/')
self.assertMatches(count, r'/SEQ/(\w{2})/\1_\d{3}/\.sgfs\.yml')
self.assertMatches(count * 3, r'/SEQ/(\w{2})/\1_\d{3}/(Audio|Plates|Ref)/')
def assertShotTask(self, count, type_, **kwargs):
self._assertTask(count, r'/SEQ/(\w{2})/\1_\d{3}', type_, **kwargs)
def assertFullStructure(self):
self.assertProject()
self.assertAssetType(2)
self.assertAsset(4)
self.assertAssetTask(4, 'Anm')
self.assertAssetTask(4, 'Comp')
self.assertAssetTask(4, 'Light')
self.assertAssetTask(4, 'Model')
self.assertSequence(2)
self.assertShot(4)
self.assertShotTask(4, 'Anm')
self.assertShotTask(4, 'Comp')
self.assertShotTask(4, 'Light')
self.assertShotTask(4, 'Model')
self.assertMatchedAll()
class Base(TestCase):
def setUp(self):
sg = Shotgun()
self.sg = self.fix = fix = Fixture(sg)
self.proj_name = 'Test Project ' + mini_uuid()
proj = fix.Project(self.proj_name)
seqs = [proj.Sequence(code, project=proj) for code in ('AA', 'BB')]
shots = [seq.Shot('%s_%03d' % (seq['code'], i), project=proj) for seq in seqs for i in range(1, 3)]
steps = [fix.find_or_create('Step', code=code, short_name=code) for code in ('Anm', 'Comp', 'Light', 'Model')]
assets = [proj.Asset(sg_asset_type=type_, code="%s %d" % (type_, i)) for type_ in ('Character', 'Vehicle') for i in range(1, 3)]
tasks = [entity.Task(step['code'] + ' something', step=step, entity=entity, project=proj) for step in (steps + steps[-1:]) for entity in (shots + assets)]
self.proj = minimal(proj)
self.seqs = map(minimal, seqs)
self.shots = map(minimal, shots)
self.steps = map(minimal, steps)
self.tasks = map(minimal, tasks)
self.assets = map(minimal, assets)
self.session = Session(self.sg)
self.sgfs = SGFS(root=self.sandbox, session=self.session, schema_name='testing')
self = None
def pathTester(self):
return PathTester(self, os.path.join(self.sandbox, self.proj_name.replace(' ', '_')))
def create(self, entities, *args, **kwargs):
self.sgfs.create_structure(entities, *args, **kwargs)
class TestFullStructure(Base):
def test_full_structure(self):
self.create(self.tasks + self.assets, allow_project=True)
paths = self.pathTester()
paths.assertFullStructure()
class TestIncrementalStructure(Base):
def test_incremental_structure(self):
paths = self.pathTester()
proj = self.session.merge(self.proj)
proj.fetch('name')
self.create([proj], allow_project=True)
with paths:
paths.assertProject()
for seq in self.seqs:
self.create([seq])
with paths:
paths.assertSequence(1)
for asset in self.assets:
self.create([asset])
with paths:
paths.assertAssetType(ANY)
paths.assertAsset(1)
for shot in self.shots:
self.create([shot])
with paths:
paths.assertShot(1)
self.create(self.tasks)
with paths:
paths.assertAssetTask(len(self.assets), 'Anm')
paths.assertAssetTask(len(self.assets), 'Comp')
paths.assertAssetTask(len(self.assets), 'Light')
paths.assertAssetTask(len(self.assets), 'Model')
paths.assertShotTask(len(self.shots), 'Anm')
paths.assertShotTask(len(self.shots), 'Comp')
paths.assertShotTask(len(self.shots), 'Light')
paths.assertShotTask(len(self.shots), 'Model')
root = os.path.join(self.sandbox, self.proj_name.replace(' ', '_'))
self.assertEqual(1, len(self.sgfs.get_directory_entity_tags(root)))
self.assertEqual(1, len(self.sgfs.get_directory_entity_tags(root + '/SEQ/AA/AA_001/Anm')))
self.assertEqual(2, len(self.sgfs.get_directory_entity_tags(root + '/SEQ/AA/AA_001/Model')))
class TestMutatedStructure(Base):
def test_mutated_structure(self):
root = os.path.join(self.sandbox, self.proj_name.replace(' ', '_'))
paths = self.pathTester()
proj = self.session.merge(self.proj)
proj.fetch('name')
self.create([proj], allow_project=True)
with paths:
paths.assertProject()
for seq in self.seqs:
self.create([seq])
with paths:
paths.assertSequence(1)
# Mutate the sequences, and rebuild the cache.
call(['mv', root + '/SEQ/AA', root + '/SEQ/XX'])
call(['mv', root + '/SEQ/BB', root + '/SEQ_BB'])
print '==== MUTATION ===='
print 'Rebuilding cache...'
self.sgfs.rebuild_cache(root, recurse=True)
print 'Recreating structure...'
self.create(self.shots)
print 'Scanning for changes...'
paths.scan()
paths.assertMatches(2, r'SEQ/XX/AA_\d+/')
paths.assertMatches(2, r'SEQ/XX/AA_\d+/\.sgfs\.yml')
paths.assertMatches(2, r'SEQ_BB/BB_\d+/')
paths.assertMatches(2, r'SEQ_BB/BB_\d+/\.sgfs\.yml')
tags = self.sgfs.get_directory_entity_tags(root + '/SEQ/XX/AA_001')
self.assertEqual(1, len(tags))
self.assertSameEntity(tags[0]['entity'], self.shots[0])
tags = self.sgfs.get_directory_entity_tags(root + '/SEQ_BB/BB_001')
self.assertEqual(1, len(tags))
self.assertSameEntity(tags[0]['entity'], self.shots[3])
class TestDryRun(Base):
def test_dry_run(self):
self.create(self.tasks + self.assets, dry_run=True)
paths = self.pathTester()
paths.assertMatchedAll()
class TestDisallowProject(TestCase):
def setUp(self):
sg = Shotgun()
self.sg = self.fix = fix = Fixture(sg)
self.proj_name = 'Test Project ' + mini_uuid()
self.proj = fix.Project(self.proj_name)
self.sgfs = SGFS(root=self.sandbox, shotgun=fix, schema_name='testing')
def tearDown(self):
self.fix.delete_all()
def test_disallow_project(self):
os.makedirs(os.path.join(self.sandbox, self.proj_name.replace(' ', '_')))
self.assertRaises(ValueError, self.sgfs.create_structure, [self.proj])
| {
"content_hash": "af9fea82946beb9193cf3bf5147dbb60",
"timestamp": "",
"source": "github",
"line_count": 313,
"max_line_length": 167,
"avg_line_length": 38.242811501597444,
"alnum_prop": 0.5579782790309106,
"repo_name": "westernx/sgfs",
"id": "e26d0f51a35487a331e194caf778e1eb831c5bc4",
"size": "11970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_structure.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "236515"
},
{
"name": "Shell",
"bytes": "148"
}
],
"symlink_target": ""
} |
import six
from trefoil.render.renderers.classified import ClassifiedRenderer
from trefoil.render.renderers.stretched import StretchedRenderer
from trefoil.render.renderers.unique import UniqueValuesRenderer
from trefoil.utilities.color import Color
AVAILABLE_RENDERERS = {
"classified": ClassifiedRenderer,
"stretched": StretchedRenderer,
"unique": UniqueValuesRenderer
}
def get_renderer_by_name(name):
return AVAILABLE_RENDERERS[name]
def get_renderer_name(renderer):
if isinstance(renderer, ClassifiedRenderer):
return "classified"
elif isinstance(renderer, StretchedRenderer):
return "stretched"
elif isinstance(renderer, UniqueValuesRenderer):
return "unique"
else:
raise ValueError("Could not find name for renderer: %s" % renderer)
def renderer_from_dict(renderer_dict):
"""Returns a renderer object from a dictionary object"""
options = renderer_dict.get('options', {})
try:
renderer_type = renderer_dict['type']
renderer_colors = [(float(x[0]), Color.from_hex(x[1])) for x in renderer_dict['colors']]
fill_value = options.get('fill_value')
if fill_value is not None:
fill_value = float(fill_value)
except KeyError:
raise ValueError("Missing required keys from renderer renderer_dicturation")
renderer_kwargs = {
'colormap': renderer_colors,
'fill_value': fill_value,
'background_color': Color(255, 255, 255, 0)
}
if renderer_type == "stretched":
color_space = options.get('color_space', 'hsv').lower().strip()
if not color_space in ('rgb', 'hsv'):
raise ValueError("Invalid color space: {}".format(color_space))
renderer = StretchedRenderer(colorspace=color_space, **renderer_kwargs)
elif renderer_type == "classified":
renderer = ClassifiedRenderer(**renderer_kwargs)
elif renderer_type == "unique":
try:
labels = [six.text_type(x) for x in options.get('labels', [])]
except TypeError:
raise ValueError("Labels option must be an array")
renderer = UniqueValuesRenderer(labels=labels, **renderer_kwargs)
return renderer | {
"content_hash": "2b354257db4023f7bb55dfd647640f92",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 96,
"avg_line_length": 34.56060606060606,
"alnum_prop": 0.6536606751424814,
"repo_name": "consbio/clover",
"id": "f5d58a3ccb43e462a7431cf59c8b7c7d7c0c97e5",
"size": "2281",
"binary": false,
"copies": "1",
"ref": "refs/heads/dependabot/pip/pillow-8.2.0",
"path": "trefoil/render/renderers/utilities.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "8307"
},
{
"name": "Python",
"bytes": "227968"
}
],
"symlink_target": ""
} |
from django.db import models
from django.db.models import Count, Sum
from django.contrib.auth.models import AbstractUser
from sgce.models import Enrollment, Event, Payment
class Person(AbstractUser):
MALE = 'M'
FEMALE = 'F'
SEX_CHOICES = (
(MALE, 'M'),
(FEMALE, 'F'),
)
PARTICIPANT = 'P'
OPERATOR = 'O'
RECEPTIONIST = 'R'
STYPE_CHOICES = (
(PARTICIPANT, 'Participante'),
(OPERATOR, 'Operador'),
(RECEPTIONIST, 'Recepcionista'),
)
name = models.CharField(verbose_name='Nome', max_length=30)
cpf = models.CharField(verbose_name='CPF', max_length=11)
nationality = models.CharField(verbose_name='Nacionalidade', max_length=64)
sex = models.CharField(verbose_name='Sexo', max_length=1, choices=SEX_CHOICES, default=MALE)
scholarity = models.CharField(verbose_name='Escolaridade', max_length=32)
birth = models.DateField(verbose_name='Data de nascimento')
stype = models.CharField(max_length=1, choices=STYPE_CHOICES, default=PARTICIPANT)
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['name', 'email', 'cpf', 'nationality', 'sex', 'scholarity', 'birth']
class Meta:
verbose_name = 'Pessoa'
verbose_name_plural = 'Pessoas'
def __unicode__(self):
return self.username
def nof_events_enrolled(self):
enrollments = Enrollment.objects.filter(person=self)
return Event.objects.filter(enrollments__in=enrollments).distinct().count()
def nof_activities_enrolled(self):
act = 0
for enrollment in Enrollment.objects.filter(person=self).annotate(act_count=Count('activities__id', distinct=True)):
act += enrollment.act_count
return act
def nof_enrollments(self):
return Enrollment.objects.filter(person=self).count()
def money_spent(self):
return Payment.objects.filter(enrollment__person=self).aggregate(Sum('price'))['price__sum']
def points_earned(self):
return Enrollment.objects.filter(person=self).aggregate(Sum('points'))['points__sum']
| {
"content_hash": "988707f9b155a24c5f0e4b98bb191a5b",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 118,
"avg_line_length": 31.7,
"alnum_prop": 0.7271293375394322,
"repo_name": "ramonsaraiva/sgce",
"id": "8820c9968c2e8aab0bb693e31be85e339ba1e9eb",
"size": "1902",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sgce/person/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "191911"
},
{
"name": "JavaScript",
"bytes": "836285"
},
{
"name": "PHP",
"bytes": "61297"
},
{
"name": "Python",
"bytes": "67290"
},
{
"name": "Shell",
"bytes": "742"
}
],
"symlink_target": ""
} |
import sys, os
from opentreetesting import test_http_json_method, config
DOMAIN = config('host', 'apihost')
SUBMIT_URI = DOMAIN + '/phylesystem/external_url/pg_99'
print SUBMIT_URI
r = test_http_json_method(SUBMIT_URI,
'GET',
expected_status=200,
return_bool_data=True)
if not r[0]:
sys.exit(1)
print r[1]
| {
"content_hash": "8064f18b3ad517c68a6e5a8cc18075a2",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 57,
"avg_line_length": 32.333333333333336,
"alnum_prop": 0.5824742268041238,
"repo_name": "OpenTreeOfLife/phylesystem-api",
"id": "ab23a9256a7537b5fd8b80853bd0336c86dce867",
"size": "410",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ws-tests/test_external_url.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "99"
},
{
"name": "Python",
"bytes": "207102"
},
{
"name": "Shell",
"bytes": "707"
}
],
"symlink_target": ""
} |
from __future__ import division
# Only py3 string encoding
from __future__ import unicode_literals
# Only py3 print
from __future__ import print_function
import os
import numpy as np
from sklearn.externals import joblib
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils import check_random_state
import hearthstone_utils as hu
if __name__ == "__main__":
hu.trainClassifiersAndSave(True);
| {
"content_hash": "aa3f3a8ba90a545652cf46b40faf069f",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 48,
"avg_line_length": 25.9375,
"alnum_prop": 0.7686746987951807,
"repo_name": "dtaralla/hearthstone",
"id": "13e4c68d09392e811aa7f1a85a7ac8e14cd8937b",
"size": "522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/trainAndScore.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3460"
},
{
"name": "C",
"bytes": "964"
},
{
"name": "C++",
"bytes": "445594"
},
{
"name": "Python",
"bytes": "38668"
},
{
"name": "QMake",
"bytes": "6638"
}
],
"symlink_target": ""
} |
"""Common function to deal with virtual environments"""
from __future__ import absolute_import
from jupyter_client.kernelspec import KernelSpec
from traitlets import default
_nothing = object()
class EnvironmentLoadingKernelSpec(KernelSpec):
"""A KernelSpec which loads `env` by activating the virtual environment"""
_loader = None
_env = _nothing
@property
def env(self):
if self._env is _nothing:
if self._loader:
try:
self._env = self._loader()
except:
self._env = {}
return self._env
def __init__(self, loader, **kwargs):
self._loader = loader
super(EnvironmentLoadingKernelSpec, self).__init__(**kwargs)
def to_dict(self):
d = dict(argv=self.argv,
# Do not trigger the loading
#env=self.env,
display_name=self.display_name,
language=self.language,
metadata=self.metadata,
)
return d
| {
"content_hash": "c92720e06c20310c6842e38537c571a3",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 78,
"avg_line_length": 26.475,
"alnum_prop": 0.5571293673276676,
"repo_name": "Cadair/jupyter_environment_kernels",
"id": "a153c16d1a3ea3c858f3e6a7facd3efa0a0e3605",
"size": "1075",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "environment_kernels/env_kernelspec.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "45900"
}
],
"symlink_target": ""
} |
"""
Flip API
Flip # noqa: E501
The version of the OpenAPI document: 3.1
Contact: cloudsupport@telestream.net
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import io
import json
import logging
import re
import ssl
import certifi
# python 2 and python 3 compatibility library
import six
from six.moves.urllib.parse import urlencode
import urllib3
from telestream_cloud_flip.exceptions import ApiException, ApiValueError
logger = logging.getLogger(__name__)
class RESTResponse(io.IOBase):
def __init__(self, resp):
self.urllib3_response = resp
self.status = resp.status
self.reason = resp.reason
self.data = resp.data
def getheaders(self):
"""Returns a dictionary of the response headers."""
return self.urllib3_response.getheaders()
def getheader(self, name, default=None):
"""Returns a given response header."""
return self.urllib3_response.getheader(name, default)
class RESTClientObject(object):
def __init__(self, configuration, pools_size=4, maxsize=None):
# urllib3.PoolManager will pass all kw parameters to connectionpool
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/poolmanager.py#L75 # noqa: E501
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/connectionpool.py#L680 # noqa: E501
# maxsize is the number of requests to host that are allowed in parallel # noqa: E501
# Custom SSL certificates and client certificates: http://urllib3.readthedocs.io/en/latest/advanced-usage.html # noqa: E501
# cert_reqs
if configuration.verify_ssl:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
# ca_certs
if configuration.ssl_ca_cert:
ca_certs = configuration.ssl_ca_cert
else:
# if not set certificate file, use Mozilla's root certificates.
ca_certs = certifi.where()
addition_pool_args = {}
if configuration.assert_hostname is not None:
addition_pool_args['assert_hostname'] = configuration.assert_hostname # noqa: E501
if configuration.retries is not None:
addition_pool_args['retries'] = configuration.retries
if maxsize is None:
if configuration.connection_pool_maxsize is not None:
maxsize = configuration.connection_pool_maxsize
else:
maxsize = 4
# https pool manager
if configuration.proxy:
self.pool_manager = urllib3.ProxyManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
proxy_url=configuration.proxy,
proxy_headers=configuration.proxy_headers,
**addition_pool_args
)
else:
self.pool_manager = urllib3.PoolManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
**addition_pool_args
)
def request(self, method, url, query_params=None, headers=None,
body=None, post_params=None, _preload_content=True,
_request_timeout=None):
"""Perform requests.
:param method: http request method
:param url: http request url
:param query_params: query parameters in the url
:param headers: http request headers
:param body: request json body, for `application/json`
:param post_params: request post parameters,
`application/x-www-form-urlencoded`
and `multipart/form-data`
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
"""
method = method.upper()
assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT',
'PATCH', 'OPTIONS']
if post_params and body:
raise ApiValueError(
"body parameter cannot be used with post_params parameter."
)
post_params = post_params or {}
headers = headers or {}
timeout = None
if _request_timeout:
if isinstance(_request_timeout, (int, ) if six.PY3 else (int, long)): # noqa: E501,F821
timeout = urllib3.Timeout(total=_request_timeout)
elif (isinstance(_request_timeout, tuple) and
len(_request_timeout) == 2):
timeout = urllib3.Timeout(
connect=_request_timeout[0], read=_request_timeout[1])
if 'Content-Type' not in headers:
headers['Content-Type'] = 'application/json'
try:
# For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE`
if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']:
if query_params:
url += '?' + urlencode(query_params)
if re.search('json', headers['Content-Type'], re.IGNORECASE):
request_body = None
if body is not None:
request_body = json.dumps(body)
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'application/x-www-form-urlencoded': # noqa: E501
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=False,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'multipart/form-data':
# must del headers['Content-Type'], or the correct
# Content-Type which generated by urllib3 will be
# overwritten.
del headers['Content-Type']
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=True,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
# Pass a `string` parameter directly in the body to support
# other content types than Json when `body` argument is
# provided in serialized form
elif isinstance(body, str) or isinstance(body, bytes):
request_body = body
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
else:
# Cannot generate the request from given parameters
msg = """Cannot prepare a request message for provided
arguments. Please check that your arguments match
declared content type."""
raise ApiException(status=0, reason=msg)
# For `GET`, `HEAD`
else:
r = self.pool_manager.request(method, url,
fields=query_params,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
except urllib3.exceptions.SSLError as e:
msg = "{0}\n{1}".format(type(e).__name__, str(e))
raise ApiException(status=0, reason=msg)
if _preload_content:
r = RESTResponse(r)
# In the python 3, the response.data is bytes.
# we need to decode it to string.
if six.PY3:
r.data = r.data.decode('utf8')
# log response body
logger.debug("response body: %s", r.data)
if not 200 <= r.status <= 299:
raise ApiException(http_resp=r)
return r
def GET(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("GET", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def HEAD(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("HEAD", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def OPTIONS(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("OPTIONS", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def DELETE(self, url, headers=None, query_params=None, body=None,
_preload_content=True, _request_timeout=None):
return self.request("DELETE", url,
headers=headers,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def POST(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("POST", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PUT(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PUT", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PATCH(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PATCH", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
| {
"content_hash": "574b87175a9d28ae8eb12b7bc9a595f5",
"timestamp": "",
"source": "github",
"line_count": 295,
"max_line_length": 134,
"avg_line_length": 42.10508474576271,
"alnum_prop": 0.5212945817567024,
"repo_name": "Telestream/telestream-cloud-python-sdk",
"id": "e49cce636277af0be4ec0b67cc85c9c7897a5f0b",
"size": "12438",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "telestream_cloud_flip_sdk/telestream_cloud_flip/rest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1339719"
},
{
"name": "Shell",
"bytes": "6712"
}
],
"symlink_target": ""
} |
import sys
import os
brief = "test all controllers"
def usage(argv0):
print("Usage: {} test controllers [--list|--help]".format(argv0))
sys.exit(1)
aliases = ['cs']
def execute(argv, argv0, engine):
if '--help' in argv or '-h' in argv:
usage(argv0)
os.environ["AIOWEB_ENV"] = "test"
environment = os.getenv("AIOWEB_ENV", "development")
os.environ.setdefault("AIOWEB_SETTINGS_MODULE", "settings")
import lib, inflection
from aioweb import settings
tests_dir = lib.dirs(settings, format=["tests_controllers"], check=True)
if not tests_dir:
print("No controller found!")
sys.exit(0)
controllers = [m[:-3] for m in os.listdir(tests_dir) if m.endswith(".py") and not m.startswith("__")]
if '--list' in argv:
[print(m[:-3]) for m in os.listdir(tests_dir) if m.endswith(".py") and not m.startswith("__")]
sys.exit(0)
for ctrl in controllers:
print(ctrl)
test_file = os.path.join(tests_dir, ctrl + ".py")
print("testing " + ctrl)
rc = None
if os.system("python3 " + test_file) != 0 and rc != 'a':
rc = lib.ask("Do You wanna continue?", ['y', 'n', 'a'])
if rc == 'n': sys.exit(1)
| {
"content_hash": "0a1d219d8768b1e78162e75d3606a7d1",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 105,
"avg_line_length": 27.17391304347826,
"alnum_prop": 0.5768,
"repo_name": "kreopt/aioweb",
"id": "4f4dbfa51e6d9d9d0bb74455b66988b04da76d31",
"size": "1250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wyrm/modules/test/controllers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6410"
},
{
"name": "JavaScript",
"bytes": "16197"
},
{
"name": "Python",
"bytes": "189868"
}
],
"symlink_target": ""
} |
from unittest import TestCase, mock
from webspider.utils.http_tools import generate_http_request_headers, requests_get, requests_post
class TestUtilHttpTools(TestCase):
def test_generate_http_request_headers(self):
header = generate_http_request_headers()
self.assertTrue(isinstance(header, dict))
header = generate_http_request_headers(referer='https://www.zhihu.com')
self.assertEqual(header['Referer'], 'https://www.zhihu.com')
@mock.patch('requests.get')
def test_request_get(self, mock_get):
mock_get.return_value = '200'
response = requests_get(url='https://baidu.com', need_sleep=False)
self.assertEqual(response, '200')
@mock.patch('requests.post')
def test_request_post(self, mock_post):
mock_post.return_value = '200'
response = requests_post(url='https://baidu.com', need_sleep=False)
self.assertEqual(response, '200')
| {
"content_hash": "58e7972e0323e916a1bdab08fedd5c0f",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 97,
"avg_line_length": 39,
"alnum_prop": 0.6805555555555556,
"repo_name": "GuozhuHe/webspider",
"id": "8f4f84be127d25127abb41614f4461824dd3edb4",
"size": "951",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_utils/test_http_tools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "599"
},
{
"name": "Python",
"bytes": "2509425"
},
{
"name": "TSQL",
"bytes": "9772"
}
],
"symlink_target": ""
} |
"""Tests for layout.
Includes positioning and dimensioning of boxes, line breaks, page breaks.
"""
| {
"content_hash": "f65cbfd4ea32f701b67b32b25b6d094e",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 73,
"avg_line_length": 20.2,
"alnum_prop": 0.7425742574257426,
"repo_name": "Kozea/WeasyPrint",
"id": "04c2657b1ede1a7f0e5ae130ac2e51a4c5be61bf",
"size": "101",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/layout/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "48998"
},
{
"name": "HTML",
"bytes": "18932"
},
{
"name": "Python",
"bytes": "1915976"
}
],
"symlink_target": ""
} |
import unittest
import os
import sys
import time
from streamsx.topology.schema import StreamSchema
from streamsx.topology.topology import *
from streamsx.topology.tester import Tester
import streamsx.topology.context
import streamsx.spl.op as op
import streamsx.spl.toolkit
def random_topic(prefix='RT_'):
return prefix + ''.join(random.choice('ABCDEFGIHIJLK') for i in range(16))
SCHEMA = StreamSchema('tuple<uint64 seq>')
def slowme(t):
if (t['seq'] % 20) == 0:
time.sleep(0.2)
return True
def check_lt_87(t):
return t['seq'] < 87
class TestPubSub(unittest.TestCase):
""" Test basic pub-sub in SPL
"""
def setUp(self):
Tester.setup_distributed(self)
def _publish(self, topo, N, topic, width=None, allow_filter=False):
b = op.Source(topo, "spl.utility::Beacon",
SCHEMA,
params = {'initDelay':10.0, 'iterations':N})
b.seq = b.output('IterationCount()')
ps = b.stream
if width:
ps = ps.parallel(width=width)
p = op.Sink('com.ibm.streamsx.topology.topic::Publish',
ps,
params = {'topic': topic},
name='MSP')
if allow_filter:
p.params['allowFilter'] = True
def _subscribe(self, topo, topic, direct=True, drop=None, filtered=False, extra=None):
s = op.Source(topo,
"com.ibm.streamsx.topology.topic::FilteredSubscribe" if filtered else "com.ibm.streamsx.topology.topic::Subscribe",
SCHEMA,
params = {'topic':topic, 'streamType':SCHEMA},
name='MSS')
if extra:
s.params.update(extra)
if not direct:
s.params['connect'] = op.Expression.expression('com.ibm.streamsx.topology.topic::Buffered')
if drop:
s.params['bufferFullPolicy'] = op.Expression.expression('Sys.' + drop)
return s.stream.filter(slowme)
return s.stream
def _get_single_sub_op(self):
job = self.tester.submission_result.job
self.assertEqual('healthy', job.health)
for op in job.get_operators():
if op.name.startswith('MSS') and op.operatorKind == 'spl.relational::Filter':
mss = op
return mss
def _get_single_sub_metrics(self, mss):
nDropped = None
nProcessed = None
ip = mss.get_input_ports()[0]
while nDropped is None or nProcessed is None:
if nDropped is None:
metrics = ip.get_metrics(name='nTuplesDropped')
if metrics:
nDropped = metrics[0]
if nProcessed is None:
metrics = ip.get_metrics(name='nTuplesProcessed')
if metrics:
nProcessed = metrics[0]
return nDropped, nProcessed
def check_single_sub(self):
"""
Check we get all the tuples with none dropped
with a single subcriber.
"""
mss = self._get_single_sub_op()
nDropped, nProcessed = self._get_single_sub_metrics(mss)
while nProcessed.value < self.N:
self.assertEqual(0, nDropped.value)
time.sleep(2)
nDropped, nProcessed = self._get_single_sub_metrics(mss)
self.assertEqual(0, nDropped.value)
self.assertEqual(self.N, nProcessed.value)
def check_single_sub_drop(self):
"""
Check we get all the tuples with none dropped
with a single subcriber.
"""
mss = self._get_single_sub_op()
nDropped, nProcessed = self._get_single_sub_metrics(mss)
while nDropped.value + nProcessed.value < self.N:
time.sleep(2)
nDropped, nProcessed = self._get_single_sub_metrics(mss)
self.assertEqual(self.N, nDropped.value + nProcessed.value)
self.assertTrue(nDropped.value > 0)
def test_One2One(self):
"""Publish->Subscribe
"""
N = 2466
topic = random_topic()
topo = Topology()
# Subscriber
s = self._subscribe(topo, topic)
# Publisher
self._publish(topo, N, topic)
self.tester = Tester(topo)
self.tester.run_for(15)
self.N = N
self.tester.tuple_count(s, N)
self.tester.local_check = self.check_single_sub
self.tester.test(self.test_ctxtype, self.test_config)
def test_One2OneNonDirect(self):
"""Publish->Subscribe with a buffered subscriber.
"""
N = 3252
topic = random_topic()
topo = Topology()
# Subscriber
s = self._subscribe(topo, topic, direct=False)
# Publisher
self._publish(topo, N, topic)
self.tester = Tester(topo)
self.tester.run_for(15)
self.N = N
self.tester.tuple_count(s, N)
self.tester.local_check = self.check_single_sub
self.tester.test(self.test_ctxtype, self.test_config)
def test_One2OneNonDirectDropFirst(self):
"""Publish->Subscribe with a buffered subscriber.
"""
N = 5032
topic = random_topic()
topo = Topology()
# Subscriber
s = self._subscribe(topo, topic, direct=False, drop='DropFirst')
# Publisher
self._publish(topo, N, topic)
self.tester = Tester(topo)
self.tester.run_for(15)
self.N = N
# 1000-2 for window & final mark
self.tester.tuple_count(s, 998, exact=False)
self.tester.local_check = self.check_single_sub_drop
self.tester.test(self.test_ctxtype, self.test_config)
def test_One2OneNonDirectDropLast(self):
"""Publish->Subscribe with a buffered subscriber.
"""
N = 5032
topic = random_topic()
topo = Topology()
# Subscriber
s = self._subscribe(topo, topic, direct=False, drop='DropLast')
# Publisher
self._publish(topo, N, topic)
self.tester = Tester(topo)
self.tester.run_for(15)
self.N = N
self.tester.tuple_count(s, 1000, exact=False)
self.tester.local_check = self.check_single_sub_drop
self.tester.test(self.test_ctxtype, self.test_config)
def test_UDPMany2One(self):
"""
UDP publishers to a single subscriber.
"""
N = 17342
for pw in (1,5):
topic = random_topic()
topo = Topology()
# Subscriber
s = self._subscribe(topo, topic)
# Publisher
self._publish(topo, N, topic, width=pw)
self.tester = Tester(topo)
self.tester.run_for(15)
self.tester.tuple_count(s, N)
self.N = N
self.tester.local_check = self.check_single_sub
self.tester.test(self.test_ctxtype, self.test_config)
def test_Many2One(self):
"""
Many non-UDP publishers to a single subscriber.
"""
N = 17342
topic = random_topic()
topo = Topology()
# Subscriber
s = self._subscribe(topo, topic)
# Publisher
M=3
for i in range(M):
self._publish(topo, N, topic)
self.tester = Tester(topo)
self.tester.run_for(15)
self.tester.tuple_count(s, N*M)
self.N = N*M
self.tester.local_check = self.check_single_sub
self.tester.test(self.test_ctxtype, self.test_config)
def test_allow_filter_subscribe(self):
N = 99
topic = random_topic()
topo = Topology()
# Non-Filter Subscriber
s = self._subscribe(topo, topic)
# Publisher
self._publish(topo, N, topic, allow_filter=True)
self.tester = Tester(topo)
self.tester.run_for(15)
self.N = N
self.tester.tuple_count(s, N)
self.tester.local_check = self.check_single_sub
self.tester.test(self.test_ctxtype, self.test_config)
def test_allow_filter_filtered_subscribe(self):
N = 201
F = 87
pd = os.path.dirname(os.path.dirname(__file__))
ttk = os.path.join(pd, 'testtk')
for af,d in [(False,False), (False,True), (True,False), (True,True)]:
#af = c[0]
#d = c[1]
topic = random_topic()
topo = Topology()
streamsx.spl.toolkit.add_toolkit(topo, ttk)
extra = {}
extra['remoteFilter'] = 'seq < 87'
extra['localFilterFunction'] = op.Expression.expression('testspl::affs')
s = self._subscribe(topo, topic, direct=d, filtered=True, extra=extra)
sf = s.filter(check_lt_87)
# Publisher
self._publish(topo, N, topic, allow_filter=af)
self.tester = Tester(topo)
self.tester.run_for(15)
self.N = F
self.tester.tuple_count(s, F)
self.tester.tuple_check(s, check_lt_87)
#self.tester.local_check = self.check_single_sub
self.tester.test(self.test_ctxtype, self.test_config)
class TestSasPubSub(TestPubSub):
""" Test basic pub-sub in SPL
"""
def setUp(self):
Tester.setup_streaming_analytics(self)
| {
"content_hash": "b410a0e9757701520b69c9817490eebf",
"timestamp": "",
"source": "github",
"line_count": 309,
"max_line_length": 130,
"avg_line_length": 29.812297734627833,
"alnum_prop": 0.5687147199305254,
"repo_name": "ddebrunner/streamsx.topology",
"id": "77b6d40ead4dc869f85f571ec8eaf26484813ec5",
"size": "9293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/spl/tests/test_pubsub.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3286"
},
{
"name": "C++",
"bytes": "200823"
},
{
"name": "HTML",
"bytes": "11056"
},
{
"name": "Java",
"bytes": "2117955"
},
{
"name": "Makefile",
"bytes": "7673"
},
{
"name": "Perl",
"bytes": "24683"
},
{
"name": "Python",
"bytes": "1712387"
},
{
"name": "Raku",
"bytes": "433"
},
{
"name": "Scala",
"bytes": "11007"
},
{
"name": "Shell",
"bytes": "14080"
}
],
"symlink_target": ""
} |
"""Tests related to django.db.backends that haven't been organized."""
import datetime
import threading
import unittest
import warnings
from django.core.management.color import no_style
from django.db import (
DEFAULT_DB_ALIAS, DatabaseError, IntegrityError, connection, connections,
reset_queries, transaction,
)
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.signals import connection_created
from django.db.backends.utils import CursorWrapper
from django.db.models.sql.constants import CURSOR
from django.test import (
TestCase, TransactionTestCase, override_settings, skipIfDBFeature,
skipUnlessDBFeature,
)
from .models import (
Article, Object, ObjectReference, Person, Post, RawData, Reporter,
ReporterProxy, SchoolClass, Square,
VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ,
)
class DateQuotingTest(TestCase):
def test_django_date_trunc(self):
"""
Test the custom ``django_date_trunc method``, in particular against
fields which clash with strings passed to it (e.g. 'year') (#12818).
"""
updated = datetime.datetime(2010, 2, 20)
SchoolClass.objects.create(year=2009, last_updated=updated)
years = SchoolClass.objects.dates('last_updated', 'year')
self.assertEqual(list(years), [datetime.date(2010, 1, 1)])
def test_django_date_extract(self):
"""
Test the custom ``django_date_extract method``, in particular against fields
which clash with strings passed to it (e.g. 'day') (#12818).
"""
updated = datetime.datetime(2010, 2, 20)
SchoolClass.objects.create(year=2009, last_updated=updated)
classes = SchoolClass.objects.filter(last_updated__day=20)
self.assertEqual(len(classes), 1)
@override_settings(DEBUG=True)
class LastExecutedQueryTest(TestCase):
def test_last_executed_query(self):
"""
last_executed_query should not raise an exception even if no previous
query has been run.
"""
with connection.cursor() as cursor:
connection.ops.last_executed_query(cursor, '', ())
def test_debug_sql(self):
list(Reporter.objects.filter(first_name="test"))
sql = connection.queries[-1]['sql'].lower()
self.assertIn("select", sql)
self.assertIn(Reporter._meta.db_table, sql)
def test_query_encoding(self):
"""last_executed_query() returns a string."""
data = RawData.objects.filter(raw_data=b'\x00\x46 \xFE').extra(select={'föö': 1})
sql, params = data.query.sql_with_params()
cursor = data.query.get_compiler('default').execute_sql(CURSOR)
last_sql = cursor.db.ops.last_executed_query(cursor, sql, params)
self.assertIsInstance(last_sql, str)
class ParameterHandlingTest(TestCase):
def test_bad_parameter_count(self):
"An executemany call with too many/not enough parameters will raise an exception (Refs #12612)"
with connection.cursor() as cursor:
query = ('INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (
connection.introspection.identifier_converter('backends_square'),
connection.ops.quote_name('root'),
connection.ops.quote_name('square')
))
with self.assertRaises(Exception):
cursor.executemany(query, [(1, 2, 3)])
with self.assertRaises(Exception):
cursor.executemany(query, [(1,)])
class LongNameTest(TransactionTestCase):
"""Long primary keys and model names can result in a sequence name
that exceeds the database limits, which will result in truncation
on certain databases (e.g., Postgres). The backend needs to use
the correct sequence name in last_insert_id and other places, so
check it is. Refs #8901.
"""
available_apps = ['backends']
def test_sequence_name_length_limits_create(self):
"""Test creation of model with long name and long pk name doesn't error. Ref #8901"""
VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
def test_sequence_name_length_limits_m2m(self):
"""
An m2m save of a model with a long name and a long m2m field name
doesn't error (#8901).
"""
obj = VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
rel_obj = Person.objects.create(first_name='Django', last_name='Reinhardt')
obj.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.add(rel_obj)
def test_sequence_name_length_limits_flush(self):
"""
Sequence resetting as part of a flush with model with long name and
long pk name doesn't error (#8901).
"""
# A full flush is expensive to the full test, so we dig into the
# internals to generate the likely offending SQL and run it manually
# Some convenience aliases
VLM = VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
VLM_m2m = VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through
tables = [
VLM._meta.db_table,
VLM_m2m._meta.db_table,
]
sequences = [
{
'column': VLM._meta.pk.column,
'table': VLM._meta.db_table
},
]
sql_list = connection.ops.sql_flush(no_style(), tables, sequences)
with connection.cursor() as cursor:
for statement in sql_list:
cursor.execute(statement)
class SequenceResetTest(TestCase):
def test_generic_relation(self):
"Sequence names are correct when resetting generic relations (Ref #13941)"
# Create an object with a manually specified PK
Post.objects.create(id=10, name='1st post', text='hello world')
# Reset the sequences for the database
commands = connections[DEFAULT_DB_ALIAS].ops.sequence_reset_sql(no_style(), [Post])
with connection.cursor() as cursor:
for sql in commands:
cursor.execute(sql)
# If we create a new object now, it should have a PK greater
# than the PK we specified manually.
obj = Post.objects.create(name='New post', text='goodbye world')
self.assertGreater(obj.pk, 10)
# This test needs to run outside of a transaction, otherwise closing the
# connection would implicitly rollback and cause problems during teardown.
class ConnectionCreatedSignalTest(TransactionTestCase):
available_apps = []
# Unfortunately with sqlite3 the in-memory test database cannot be closed,
# and so it cannot be re-opened during testing.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_signal(self):
data = {}
def receiver(sender, connection, **kwargs):
data["connection"] = connection
connection_created.connect(receiver)
connection.close()
connection.cursor()
self.assertIs(data["connection"].connection, connection.connection)
connection_created.disconnect(receiver)
data.clear()
connection.cursor()
self.assertEqual(data, {})
class EscapingChecks(TestCase):
"""
All tests in this test case are also run with settings.DEBUG=True in
EscapingChecksDebug test case, to also test CursorDebugWrapper.
"""
bare_select_suffix = connection.features.bare_select_suffix
def test_paramless_no_escaping(self):
with connection.cursor() as cursor:
cursor.execute("SELECT '%s'" + self.bare_select_suffix)
self.assertEqual(cursor.fetchall()[0][0], '%s')
def test_parameter_escaping(self):
with connection.cursor() as cursor:
cursor.execute("SELECT '%%', %s" + self.bare_select_suffix, ('%d',))
self.assertEqual(cursor.fetchall()[0], ('%', '%d'))
@override_settings(DEBUG=True)
class EscapingChecksDebug(EscapingChecks):
pass
class BackendTestCase(TransactionTestCase):
available_apps = ['backends']
def create_squares_with_executemany(self, args):
self.create_squares(args, 'format', True)
def create_squares(self, args, paramstyle, multiple):
opts = Square._meta
tbl = connection.introspection.identifier_converter(opts.db_table)
f1 = connection.ops.quote_name(opts.get_field('root').column)
f2 = connection.ops.quote_name(opts.get_field('square').column)
if paramstyle == 'format':
query = 'INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (tbl, f1, f2)
elif paramstyle == 'pyformat':
query = 'INSERT INTO %s (%s, %s) VALUES (%%(root)s, %%(square)s)' % (tbl, f1, f2)
else:
raise ValueError("unsupported paramstyle in test")
with connection.cursor() as cursor:
if multiple:
cursor.executemany(query, args)
else:
cursor.execute(query, args)
def test_cursor_executemany(self):
# Test cursor.executemany #4896
args = [(i, i ** 2) for i in range(-5, 6)]
self.create_squares_with_executemany(args)
self.assertEqual(Square.objects.count(), 11)
for i in range(-5, 6):
square = Square.objects.get(root=i)
self.assertEqual(square.square, i ** 2)
def test_cursor_executemany_with_empty_params_list(self):
# Test executemany with params=[] does nothing #4765
args = []
self.create_squares_with_executemany(args)
self.assertEqual(Square.objects.count(), 0)
def test_cursor_executemany_with_iterator(self):
# Test executemany accepts iterators #10320
args = ((i, i ** 2) for i in range(-3, 2))
self.create_squares_with_executemany(args)
self.assertEqual(Square.objects.count(), 5)
args = ((i, i ** 2) for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares_with_executemany(args)
self.assertEqual(Square.objects.count(), 9)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_execute_with_pyformat(self):
# Support pyformat style passing of parameters #10070
args = {'root': 3, 'square': 9}
self.create_squares(args, 'pyformat', multiple=False)
self.assertEqual(Square.objects.count(), 1)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat(self):
# Support pyformat style passing of parameters #10070
args = [{'root': i, 'square': i ** 2} for i in range(-5, 6)]
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(Square.objects.count(), 11)
for i in range(-5, 6):
square = Square.objects.get(root=i)
self.assertEqual(square.square, i ** 2)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat_iterator(self):
args = ({'root': i, 'square': i ** 2} for i in range(-3, 2))
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(Square.objects.count(), 5)
args = ({'root': i, 'square': i ** 2} for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(Square.objects.count(), 9)
def test_unicode_fetches(self):
# fetchone, fetchmany, fetchall return strings as unicode objects #6254
qn = connection.ops.quote_name
Person(first_name="John", last_name="Doe").save()
Person(first_name="Jane", last_name="Doe").save()
Person(first_name="Mary", last_name="Agnelline").save()
Person(first_name="Peter", last_name="Parker").save()
Person(first_name="Clark", last_name="Kent").save()
opts2 = Person._meta
f3, f4 = opts2.get_field('first_name'), opts2.get_field('last_name')
with connection.cursor() as cursor:
cursor.execute(
'SELECT %s, %s FROM %s ORDER BY %s' % (
qn(f3.column),
qn(f4.column),
connection.introspection.identifier_converter(opts2.db_table),
qn(f3.column),
)
)
self.assertEqual(cursor.fetchone(), ('Clark', 'Kent'))
self.assertEqual(list(cursor.fetchmany(2)), [('Jane', 'Doe'), ('John', 'Doe')])
self.assertEqual(list(cursor.fetchall()), [('Mary', 'Agnelline'), ('Peter', 'Parker')])
def test_unicode_password(self):
old_password = connection.settings_dict['PASSWORD']
connection.settings_dict['PASSWORD'] = "françois"
try:
connection.cursor()
except DatabaseError:
# As password is probably wrong, a database exception is expected
pass
except Exception as e:
self.fail("Unexpected error raised with unicode password: %s" % e)
finally:
connection.settings_dict['PASSWORD'] = old_password
def test_database_operations_helper_class(self):
# Ticket #13630
self.assertTrue(hasattr(connection, 'ops'))
self.assertTrue(hasattr(connection.ops, 'connection'))
self.assertEqual(connection, connection.ops.connection)
def test_database_operations_init(self):
"""
DatabaseOperations initialization doesn't query the database.
See #17656.
"""
with self.assertNumQueries(0):
connection.ops.__class__(connection)
def test_cached_db_features(self):
self.assertIn(connection.features.supports_transactions, (True, False))
self.assertIn(connection.features.can_introspect_foreign_keys, (True, False))
def test_duplicate_table_error(self):
""" Creating an existing table returns a DatabaseError """
query = 'CREATE TABLE %s (id INTEGER);' % Article._meta.db_table
with connection.cursor() as cursor:
with self.assertRaises(DatabaseError):
cursor.execute(query)
def test_cursor_contextmanager(self):
"""
Cursors can be used as a context manager
"""
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
# Both InterfaceError and ProgrammingError seem to be used when
# accessing closed cursor (psycopg2 has InterfaceError, rest seem
# to use ProgrammingError).
with self.assertRaises(connection.features.closed_cursor_error_class):
# cursor should be closed, so no queries should be possible.
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
@unittest.skipUnless(connection.vendor == 'postgresql',
"Psycopg2 specific cursor.closed attribute needed")
def test_cursor_contextmanager_closing(self):
# There isn't a generic way to test that cursors are closed, but
# psycopg2 offers us a way to check that by closed attribute.
# So, run only on psycopg2 for that reason.
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
self.assertTrue(cursor.closed)
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_is_usable_after_database_disconnects(self):
"""
is_usable() doesn't crash when the database disconnects (#21553).
"""
# Open a connection to the database.
with connection.cursor():
pass
# Emulate a connection close by the database.
connection._close()
# Even then is_usable() should not raise an exception.
try:
self.assertFalse(connection.is_usable())
finally:
# Clean up the mess created by connection._close(). Since the
# connection is already closed, this crashes on some backends.
try:
connection.close()
except Exception:
pass
@override_settings(DEBUG=True)
def test_queries(self):
"""
Test the documented API of connection.queries.
"""
with connection.cursor() as cursor:
reset_queries()
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
self.assertEqual(1, len(connection.queries))
self.assertIsInstance(connection.queries, list)
self.assertIsInstance(connection.queries[0], dict)
self.assertCountEqual(connection.queries[0], ['sql', 'time'])
reset_queries()
self.assertEqual(0, len(connection.queries))
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
@override_settings(DEBUG=True)
def test_queries_limit(self):
"""
The backend doesn't store an unlimited number of queries (#12581).
"""
old_queries_limit = BaseDatabaseWrapper.queries_limit
BaseDatabaseWrapper.queries_limit = 3
new_connection = connection.copy()
# Initialize the connection and clear initialization statements.
with new_connection.cursor():
pass
new_connection.queries_log.clear()
try:
with new_connection.cursor() as cursor:
cursor.execute("SELECT 1" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 2" + new_connection.features.bare_select_suffix)
with warnings.catch_warnings(record=True) as w:
self.assertEqual(2, len(new_connection.queries))
self.assertEqual(0, len(w))
with new_connection.cursor() as cursor:
cursor.execute("SELECT 3" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 4" + new_connection.features.bare_select_suffix)
msg = "Limit for query logging exceeded, only the last 3 queries will be returned."
with self.assertWarnsMessage(UserWarning, msg):
self.assertEqual(3, len(new_connection.queries))
finally:
BaseDatabaseWrapper.queries_limit = old_queries_limit
new_connection.close()
def test_timezone_none_use_tz_false(self):
connection.ensure_connection()
with self.settings(TIME_ZONE=None, USE_TZ=False):
connection.init_connection_state()
# These tests aren't conditional because it would require differentiating
# between MySQL+InnoDB and MySQL+MYISAM (something we currently can't do).
class FkConstraintsTests(TransactionTestCase):
available_apps = ['backends']
def setUp(self):
# Create a Reporter.
self.r = Reporter.objects.create(first_name='John', last_name='Smith')
def test_integrity_checks_on_creation(self):
"""
Try to create a model instance that violates a FK constraint. If it
fails it should fail with IntegrityError.
"""
a1 = Article(headline="This is a test", pub_date=datetime.datetime(2005, 7, 27), reporter_id=30)
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy Refs #17519
a2 = Article(
headline='This is another test', reporter=self.r,
pub_date=datetime.datetime(2012, 8, 3),
reporter_proxy_id=30,
)
with self.assertRaises(IntegrityError):
a2.save()
def test_integrity_checks_on_update(self):
"""
Try to update a model instance introducing a FK constraint violation.
If it fails it should fail with IntegrityError.
"""
# Create an Article.
Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a1 = Article.objects.get(headline="Test article")
a1.reporter_id = 30
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy Refs #17519
# Create another article
r_proxy = ReporterProxy.objects.get(pk=self.r.pk)
Article.objects.create(
headline='Another article',
pub_date=datetime.datetime(1988, 5, 15),
reporter=self.r, reporter_proxy=r_proxy,
)
# Retrieve the second article from the DB
a2 = Article.objects.get(headline='Another article')
a2.reporter_proxy_id = 30
with self.assertRaises(IntegrityError):
a2.save()
def test_disable_constraint_checks_manually(self):
"""
When constraint checks are disabled, should be able to write bad data
without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
Article.objects.create(
headline="Test article",
pub_date=datetime.datetime(2010, 9, 4),
reporter=self.r,
)
# Retrieve it from the DB
a = Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
connection.disable_constraint_checking()
a.save()
connection.enable_constraint_checking()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_disable_constraint_checks_context_manager(self):
"""
When constraint checks are disabled (using context manager), should be
able to write bad data without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
Article.objects.create(
headline="Test article",
pub_date=datetime.datetime(2010, 9, 4),
reporter=self.r,
)
# Retrieve it from the DB
a = Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
with connection.constraint_checks_disabled():
a.save()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_check_constraints(self):
"""
Constraint checks should raise an IntegrityError when bad data is in the DB.
"""
with transaction.atomic():
# Create an Article.
Article.objects.create(
headline="Test article",
pub_date=datetime.datetime(2010, 9, 4),
reporter=self.r,
)
# Retrieve it from the DB
a = Article.objects.get(headline="Test article")
a.reporter_id = 30
with connection.constraint_checks_disabled():
a.save()
with self.assertRaises(IntegrityError):
connection.check_constraints()
transaction.set_rollback(True)
class ThreadTests(TransactionTestCase):
available_apps = ['backends']
def test_default_connection_thread_local(self):
"""
The default connection (i.e. django.db.connection) is different for
each thread (#17258).
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
connection.cursor()
connections_dict[id(connection)] = connection
def runner():
# Passing django.db.connection between threads doesn't work while
# connections[DEFAULT_DB_ALIAS] does.
from django.db import connections
connection = connections[DEFAULT_DB_ALIAS]
# Allow thread sharing so the connection can be closed by the
# main thread.
connection.inc_thread_sharing()
connection.cursor()
connections_dict[id(connection)] = connection
try:
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
# Each created connection got different inner connection.
self.assertEqual(len({conn.connection for conn in connections_dict.values()}), 3)
finally:
# Finish by closing the connections opened by the other threads
# (the connection opened in the main thread will automatically be
# closed on teardown).
for conn in connections_dict.values():
if conn is not connection:
if conn.allow_thread_sharing:
conn.close()
conn.dec_thread_sharing()
def test_connections_thread_local(self):
"""
The connections are different for each thread (#17258).
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
for conn in connections.all():
connections_dict[id(conn)] = conn
def runner():
from django.db import connections
for conn in connections.all():
# Allow thread sharing so the connection can be closed by the
# main thread.
conn.inc_thread_sharing()
connections_dict[id(conn)] = conn
try:
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertEqual(len(connections_dict), 6)
finally:
# Finish by closing the connections opened by the other threads
# (the connection opened in the main thread will automatically be
# closed on teardown).
for conn in connections_dict.values():
if conn is not connection:
if conn.allow_thread_sharing:
conn.close()
conn.dec_thread_sharing()
def test_pass_connection_between_threads(self):
"""
A connection can be passed from one thread to the other (#17258).
"""
Person.objects.create(first_name="John", last_name="Doe")
def do_thread():
def runner(main_thread_connection):
from django.db import connections
connections['default'] = main_thread_connection
try:
Person.objects.get(first_name="John", last_name="Doe")
except Exception as e:
exceptions.append(e)
t = threading.Thread(target=runner, args=[connections['default']])
t.start()
t.join()
# Without touching thread sharing, which should be False by default.
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# After calling inc_thread_sharing() on the connection.
connections['default'].inc_thread_sharing()
try:
exceptions = []
do_thread()
# All good
self.assertEqual(exceptions, [])
finally:
connections['default'].dec_thread_sharing()
def test_closing_non_shared_connections(self):
"""
A connection that is not explicitly shareable cannot be closed by
another thread (#17258).
"""
# First, without explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# The exception was raised
self.assertEqual(len(exceptions), 1)
# Then, with explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
# Enable thread sharing
connections['default'].inc_thread_sharing()
try:
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
finally:
connections['default'].dec_thread_sharing()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# No exception was raised
self.assertEqual(len(exceptions), 0)
def test_thread_sharing_count(self):
self.assertIs(connection.allow_thread_sharing, False)
connection.inc_thread_sharing()
self.assertIs(connection.allow_thread_sharing, True)
connection.inc_thread_sharing()
self.assertIs(connection.allow_thread_sharing, True)
connection.dec_thread_sharing()
self.assertIs(connection.allow_thread_sharing, True)
connection.dec_thread_sharing()
self.assertIs(connection.allow_thread_sharing, False)
msg = 'Cannot decrement the thread sharing count below zero.'
with self.assertRaisesMessage(RuntimeError, msg):
connection.dec_thread_sharing()
class MySQLPKZeroTests(TestCase):
"""
Zero as id for AutoField should raise exception in MySQL, because MySQL
does not allow zero for autoincrement primary key.
"""
@skipIfDBFeature('allows_auto_pk_0')
def test_zero_as_autoval(self):
with self.assertRaises(ValueError):
Square.objects.create(id=0, root=0, square=1)
class DBConstraintTestCase(TestCase):
def test_can_reference_existent(self):
obj = Object.objects.create()
ref = ObjectReference.objects.create(obj=obj)
self.assertEqual(ref.obj, obj)
ref = ObjectReference.objects.get(obj=obj)
self.assertEqual(ref.obj, obj)
def test_can_reference_non_existent(self):
self.assertFalse(Object.objects.filter(id=12345).exists())
ref = ObjectReference.objects.create(obj_id=12345)
ref_new = ObjectReference.objects.get(obj_id=12345)
self.assertEqual(ref, ref_new)
with self.assertRaises(Object.DoesNotExist):
ref.obj
def test_many_to_many(self):
obj = Object.objects.create()
obj.related_objects.create()
self.assertEqual(Object.objects.count(), 2)
self.assertEqual(obj.related_objects.count(), 1)
intermediary_model = Object._meta.get_field("related_objects").remote_field.through
intermediary_model.objects.create(from_object_id=obj.id, to_object_id=12345)
self.assertEqual(obj.related_objects.count(), 1)
self.assertEqual(intermediary_model.objects.count(), 2)
| {
"content_hash": "12eda7b7f2f10e2ab6864fe415c22d6f",
"timestamp": "",
"source": "github",
"line_count": 795,
"max_line_length": 112,
"avg_line_length": 39.9496855345912,
"alnum_prop": 0.6163098236775819,
"repo_name": "charettes/django",
"id": "6138a3626c60505cd4ad9aaffe2d820f4464b44b",
"size": "31763",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/backends/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "85024"
},
{
"name": "HTML",
"bytes": "224332"
},
{
"name": "JavaScript",
"bytes": "251339"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "13018015"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
import re
import geocoder
from six import string_types
class Location(object):
""" Location container """
lat = None
lng = None
def __init__(self, location, **kwargs):
self.location = location
self.kwargs = kwargs
self._check_input(location)
@property
def ok(self):
return bool(self.latlng)
def _convert_float(self, number):
try:
return float(number)
except ValueError:
return None
def _check_input(self, location):
# Checking for a LatLng String
if isinstance(location, string_types):
expression = r"[-]?\d+[.]?[-]?[\d]+"
pattern = re.compile(expression)
match = pattern.findall(location)
if len(match) == 2:
lat, lng = match
self._check_for_list([lat, lng])
else:
# Check for string to Geocode using a provider
provider = self.kwargs.get('provider', 'osm')
g = geocoder.get(location, provider=provider)
if g.ok:
self.lat, self.lng = g.lat, g.lng
# Checking for List of Tuple
elif isinstance(location, (list, tuple)):
self._check_for_list(location)
# Checking for Dictionary
elif isinstance(location, dict):
self._check_for_dict(location)
# Checking for a Geocoder Class
elif hasattr(location, 'latlng'):
if location.latlng:
self.lat, self.lng = location.latlng
# Result into Error
else:
raise ValueError("Unknown location: %s" % location)
def _check_for_list(self, location):
# Standard LatLng list or tuple with 2 number values
if len(location) == 2:
lat = self._convert_float(location[0])
lng = self._convert_float(location[1])
condition_1 = isinstance(lat, float)
condition_2 = isinstance(lng, float)
# Check if input are Floats
if condition_1 and condition_2:
condition_3 = -90 <= lat <= 90
condition_4 = -180 <= lng <= 180
# Check if inputs are within the World Geographical
# boundary (90,180,-90,-180)
if condition_3 and condition_4:
self.lat = lat
self.lng = lng
return self.lat, self.lng
else:
raise ValueError("Coords are not within the world's geographical boundary")
else:
raise ValueError("Coordinates must be numbers")
def _check_for_dict(self, location):
# Standard LatLng list or tuple with 2 number values
if 'lat' in location and 'lng' in location:
lat = location['lat']
lng = location['lng']
self._check_for_list([lat, lng])
if 'y' in location and 'x' in location:
lat = location['y']
lng = location['x']
self._check_for_list([lat, lng])
@property
def latlng(self):
if isinstance(self.lat, float) and isinstance(self.lng, float):
return [self.lat, self.lng]
return []
@property
def latitude(self):
return self.lat
@property
def longitude(self):
return self.lng
@property
def xy(self):
if isinstance(self.lat, float) and isinstance(self.lng, float):
return [self.lng, self.lat]
return []
def __str__(self):
if self.ok:
return '{0}, {1}'.format(self.lat, self.lng)
return ''
if __name__ == '__main__':
l = Location([0.0, 0.0])
print(l.lng)
| {
"content_hash": "940a52121ab28c11a1f7413c4940db44",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 95,
"avg_line_length": 30.688524590163933,
"alnum_prop": 0.532051282051282,
"repo_name": "minimedj/geocoder",
"id": "b3300bb43a4487f8cc9af71021f082c42014bd6c",
"size": "3778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geocoder/location.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "422"
},
{
"name": "Python",
"bytes": "124959"
}
],
"symlink_target": ""
} |
import icat.client
url = "https://icat.example.com:8181"
client = icat.client.Client(url, checkCert=False)
print("Connect to %s\nICAT version %s" % (url, client.apiversion))
| {
"content_hash": "f35186a872ff70ed92b2d39531133f99",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 66,
"avg_line_length": 35,
"alnum_prop": 0.7314285714285714,
"repo_name": "icatproject/python-icat",
"id": "95b81b302bd042c274b44deb431741f3d048aac2",
"size": "195",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "doc/tutorial/hello-nocert.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "626"
},
{
"name": "Python",
"bytes": "467674"
}
],
"symlink_target": ""
} |
from rest_framework import permissions
class IsOwnerOrReadOnly(permissions.BasePermission):
"""Custom permission to only allow owners of an object to edit it."""
def has_object_permission(self, request, view, obj):
"""Read permissions are allowed to any request."""
if request.method in permissions.SAFE_METHODS:
return True
# Write permissions are only allowed to the owner of the snippet
return obj.owner == request.user # This came from imager. might need changes.
| {
"content_hash": "2678344d170e6b2e572c85475f9bca40",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 86,
"avg_line_length": 40.38461538461539,
"alnum_prop": 0.7028571428571428,
"repo_name": "pyphonic/Pyphon",
"id": "8639ec1d8b456a3f149e52777445e340b5cad894",
"size": "525",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "pyphon/api/permissions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12055"
},
{
"name": "HTML",
"bytes": "26149"
},
{
"name": "JavaScript",
"bytes": "40316"
},
{
"name": "Python",
"bytes": "86587"
}
],
"symlink_target": ""
} |
from reporter.core import SqlReport
from reporter.emailing import RECIPIENT_IT_DWH
from reporter.uhl_reports.civicrm import get_contact_id_search_link
from reporter.connections import get_redcap_link
class NationalBioresourceNotInCivicrm(SqlReport):
def __init__(self):
super().__init__(
introduction=("The following participants have "
"a record in REDCap, but do not have "
"a record in CiviCRM"),
recipients=[RECIPIENT_IT_DWH],
sql='''
SELECT record
FROM STG_redcap.dbo.all_projects_fully_consented redcap
JOIN [STG_CiviCRM].[dbo].[LCBRU_CaseDetails] civi
ON civi.case_type_id = 21
AND civi.StudyNumber = 'National Bioresource'
AND civi.is_recruited = 1
WHERE project_id = 9
AND NOT EXISTS (
SELECT 1
FROM [STG_CiviCRM].[dbo].[LCBRU_CaseDetails] civi
WHERE civi.case_type_id = 27
AND ( civi.StudyNumber2 = redcap.record
OR civi.StudyNumber3 = redcap.record)
)
'''
)
def get_report_line(self, row):
return '- {}; {}\r\n'.format(
get_redcap_link(
row['record'],
9,
row['record'],
),
get_contact_id_search_link(
'CiviCRM',
row['record'],
),
)
class NationalCivicrmNotInBioresource(SqlReport):
def __init__(self):
super().__init__(
introduction=("The following participants have "
"a record in CiviCRM, but do not have "
"a record in REDCap"),
recipients=[RECIPIENT_IT_DWH],
sql='''
DECLARE @redcap TABLE
(
record VARCHAR(20) PRIMARY KEY
)
INSERT INTO @redcap(record)
SELECT DISTINCT record
FROM STG_redcap.dbo.all_projects_fully_consented redcap
WHERE redcap.project_id = 9
SELECT StudyNumber
FROM [STG_CiviCRM].[dbo].[LCBRU_CaseDetails] civi
WHERE civi.case_type_id = 27
AND NOT EXISTS (
SELECT 1
FROM @redcap redcap
WHERE (StudyNumber2 = redcap.record
OR civi.StudyNumber3 = redcap.record)
)
'''
)
def get_report_line(self, row):
return '- {}\r\n'.format(
get_contact_id_search_link(
row['StudyNumber'],
row['StudyNumber'],
))
| {
"content_hash": "a435d7b2bc30c8719bd0592aa21f02f1",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 67,
"avg_line_length": 28.655172413793103,
"alnum_prop": 0.5451263537906137,
"repo_name": "LCBRU/reporter",
"id": "151e163c6f57153da7002c4f90f02dd3cb3ed74f",
"size": "2517",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reporter/uhl_reports/national_bioresource/data_quality/civi_redcap_xref.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "386"
},
{
"name": "HTML",
"bytes": "3199"
},
{
"name": "Python",
"bytes": "600192"
}
],
"symlink_target": ""
} |
"""Defines Fairness Indicator's Jupyter notebook widgets."""
import ipywidgets as widgets
from tensorflow_model_analysis.version import VERSION
import traitlets
from traitlets import List
from traitlets import Unicode
@widgets.register
class FairnessIndicatorViewer(widgets.DOMWidget):
"""The fairness indicator visualization widget."""
_view_name = Unicode('FairnessIndicatorView').tag(sync=True)
_view_module = Unicode('tensorflow_model_analysis').tag(sync=True)
_view_module_version = Unicode(VERSION).tag(sync=True)
_model_name = Unicode('FairnessIndicatorModel').tag(sync=True)
_model_module = Unicode('tensorflow_model_analysis').tag(sync=True)
_model_module_version = Unicode(VERSION).tag(sync=True)
slicingMetrics = List().tag(sync=True)
slicingMetricsCompare = List().tag(sync=True)
evalName = Unicode().tag(sync=True)
evalNameCompare = Unicode().tag(sync=True)
# Used for handling on the js side.
eventHandlers = {}
js_events = List([]).tag(sync=True)
@traitlets.observe('js_events')
def _handle_js_events(self, change):
if self.js_events:
if self.eventHandlers:
for event in self.js_events:
event_name = event['name']
if event_name in self.eventHandlers:
self.eventHandlers[event_name](event['detail'])
# clears the event queue.
self.js_events = []
| {
"content_hash": "20c9f468f48b9a28b6eae8482c08fec8",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 69,
"avg_line_length": 36.729729729729726,
"alnum_prop": 0.7159676232523915,
"repo_name": "tensorflow/model-analysis",
"id": "d5e6b72ce79e2c8279fa5933077ad2fc73090a0c",
"size": "1934",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_model_analysis/addons/fairness/notebook/jupyter/widget.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "125312"
},
{
"name": "JavaScript",
"bytes": "1415355"
},
{
"name": "Python",
"bytes": "3261298"
},
{
"name": "Shell",
"bytes": "813"
},
{
"name": "Starlark",
"bytes": "11590"
}
],
"symlink_target": ""
} |
""" File containing information about which treatment stage Pyccel is executing
"""
from .metaclasses import Singleton
class PyccelStage(metaclass = Singleton):
""" Class wrapping a string indicating which treatment stage Pyccel is executing.
This string is one of:
- syntactic
- semantic
- codegen
When Pyccel is not executing the stage is None
"""
def __init__(self):
self._stage = None
def set_stage(self, stage):
""" Set the current treatment stage
"""
assert stage in ('syntactic', 'semantic', 'codegen')
self._stage = stage
def __eq__(self, other):
return self._stage == other
def pyccel_finished(self):
""" Indicate that Pyccel has finished running and reset stage to None
"""
self._stage = None
| {
"content_hash": "c72730d274fac501d4b39c5c3ea6acc4",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 85,
"avg_line_length": 28.517241379310345,
"alnum_prop": 0.6287787182587666,
"repo_name": "ratnania/pyccel",
"id": "1934aceb519e4b666b188e0875eabab572c7eea7",
"size": "1199",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyccel/utilities/stage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CMake",
"bytes": "111665"
},
{
"name": "Python",
"bytes": "863199"
},
{
"name": "Shell",
"bytes": "712"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: iam
short_description: Manage IAM users, groups, roles and keys
description:
- Allows for the management of IAM users, user API keys, groups, roles.
version_added: "2.0"
options:
iam_type:
description:
- Type of IAM resource
choices: ["user", "group", "role"]
name:
description:
- Name of IAM resource to create or identify
required: true
new_name:
description:
- When state is update, will replace name with new_name on IAM resource
new_path:
description:
- When state is update, will replace the path with new_path on the IAM resource
state:
description:
- Whether to create, delete or update the IAM resource. Note, roles cannot be updated.
required: true
choices: [ "present", "absent", "update" ]
path:
description:
- When creating or updating, specify the desired path of the resource. If state is present,
it will replace the current path to match what is passed in when they do not match.
default: "/"
trust_policy:
description:
- The inline (JSON or YAML) trust policy document that grants an entity permission to assume the role. Mutually exclusive with C(trust_policy_filepath).
version_added: "2.2"
trust_policy_filepath:
description:
- The path to the trust policy document that grants an entity permission to assume the role. Mutually exclusive with C(trust_policy).
version_added: "2.2"
access_key_state:
description:
- When type is user, it creates, removes, deactivates or activates a user's access key(s). Note that actions apply only to keys specified.
choices: [ "create", "remove", "active", "inactive"]
key_count:
description:
- When access_key_state is create it will ensure this quantity of keys are present. Defaults to 1.
default: '1'
access_key_ids:
description:
- A list of the keys that you want impacted by the access_key_state parameter.
groups:
description:
- A list of groups the user should belong to. When update, will gracefully remove groups not listed.
password:
description:
- When type is user and state is present, define the users login password. Also works with update. Note that always returns changed.
update_password:
default: always
choices: ['always', 'on_create']
description:
- C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users.
notes:
- 'Currently boto does not support the removal of Managed Policies, the module will error out if your
user/group/role has managed policies when you try to do state=absent. They will need to be removed manually.'
author:
- "Jonathan I. Davila (@defionscode)"
- "Paul Seiffert (@seiffert)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Basic user creation example
tasks:
- name: Create two new IAM users with API keys
iam:
iam_type: user
name: "{{ item }}"
state: present
password: "{{ temp_pass }}"
access_key_state: create
loop:
- jcleese
- mpython
# Advanced example, create two new groups and add the pre-existing user
# jdavila to both groups.
task:
- name: Create Two Groups, Mario and Luigi
iam:
iam_type: group
name: "{{ item }}"
state: present
loop:
- Mario
- Luigi
register: new_groups
- name:
iam:
iam_type: user
name: jdavila
state: update
groups: "{{ item.created_group.group_name }}"
loop: "{{ new_groups.results }}"
# Example of role with custom trust policy for Lambda service
- name: Create IAM role with custom trust relationship
iam:
iam_type: role
name: AAALambdaTestRole
state: present
trust_policy:
Version: '2012-10-17'
Statement:
- Action: sts:AssumeRole
Effect: Allow
Principal:
Service: lambda.amazonaws.com
'''
RETURN = '''
role_result:
description: the IAM.role dict returned by Boto
type: str
returned: if iam_type=role and state=present
sample: {
"arn": "arn:aws:iam::A1B2C3D4E5F6:role/my-new-role",
"assume_role_policy_document": "...truncated...",
"create_date": "2017-09-02T14:32:23Z",
"path": "/",
"role_id": "AROAA1B2C3D4E5F6G7H8I",
"role_name": "my-new-role"
}
roles:
description: a list containing the name of the currently defined roles
type: list
returned: if iam_type=role and state=present
sample: [
"my-new-role",
"my-existing-role-1",
"my-existing-role-2",
"my-existing-role-3",
"my-existing-role-...",
]
'''
import json
import traceback
try:
import boto.exception
import boto.iam
import boto.iam.connection
except ImportError:
pass # Taken care of by ec2.HAS_BOTO
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (HAS_BOTO, boto_exception, connect_to_aws, ec2_argument_spec,
get_aws_connection_info)
def _paginate(func, attr):
'''
paginates the results from func by continuously passing in
the returned marker if the results were truncated. this returns
an iterator over the items in the returned response. `attr` is
the name of the attribute to iterate over in the response.
'''
finished, marker = False, None
while not finished:
res = func(marker=marker)
for item in getattr(res, attr):
yield item
finished = res.is_truncated == 'false'
if not finished:
marker = res.marker
def list_all_groups(iam):
return [item['group_name'] for item in _paginate(iam.get_all_groups, 'groups')]
def list_all_users(iam):
return [item['user_name'] for item in _paginate(iam.get_all_users, 'users')]
def list_all_roles(iam):
return [item['role_name'] for item in _paginate(iam.list_roles, 'roles')]
def list_all_instance_profiles(iam):
return [item['instance_profile_name'] for item in _paginate(iam.list_instance_profiles, 'instance_profiles')]
def create_user(module, iam, name, pwd, path, key_state, key_count):
key_qty = 0
keys = []
try:
user_meta = iam.create_user(
name, path).create_user_response.create_user_result.user
changed = True
if pwd is not None:
pwd = iam.create_login_profile(name, pwd)
if key_state in ['create']:
if key_count:
while key_count > key_qty:
keys.append(iam.create_access_key(
user_name=name).create_access_key_response.
create_access_key_result.
access_key)
key_qty += 1
else:
keys = None
except boto.exception.BotoServerError as err:
module.fail_json(changed=False, msg=str(err))
else:
user_info = dict(created_user=user_meta, password=pwd, access_keys=keys)
return (user_info, changed)
def delete_dependencies_first(module, iam, name):
changed = False
# try to delete any keys
try:
current_keys = [ck['access_key_id'] for ck in
iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
for key in current_keys:
iam.delete_access_key(key, name)
changed = True
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg="Failed to delete keys: %s" % err, exception=traceback.format_exc())
# try to delete login profiles
try:
login_profile = iam.get_login_profiles(name).get_login_profile_response
iam.delete_login_profile(name)
changed = True
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if 'Login Profile for User ' + name + ' cannot be found.' not in error_msg:
module.fail_json(changed=changed, msg="Failed to delete login profile: %s" % err, exception=traceback.format_exc())
# try to detach policies
try:
for policy in iam.get_all_user_policies(name).list_user_policies_result.policy_names:
iam.delete_user_policy(name, policy)
changed = True
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if 'must detach all policies first' in error_msg:
module.fail_json(changed=changed, msg="All inline polices have been removed. Though it appears"
"that %s has Managed Polices. This is not "
"currently supported by boto. Please detach the polices "
"through the console and try again." % name)
module.fail_json(changed=changed, msg="Failed to delete policies: %s" % err, exception=traceback.format_exc())
# try to deactivate associated MFA devices
try:
mfa_devices = iam.get_all_mfa_devices(name).get('list_mfa_devices_response', {}).get('list_mfa_devices_result', {}).get('mfa_devices', [])
for device in mfa_devices:
iam.deactivate_mfa_device(name, device['serial_number'])
changed = True
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg="Failed to deactivate associated MFA devices: %s" % err, exception=traceback.format_exc())
return changed
def delete_user(module, iam, name):
changed = delete_dependencies_first(module, iam, name)
try:
iam.delete_user(name)
except boto.exception.BotoServerError as ex:
module.fail_json(changed=changed, msg="Failed to delete user %s: %s" % (name, ex), exception=traceback.format_exc())
else:
changed = True
return name, changed
def update_user(module, iam, name, new_name, new_path, key_state, key_count, keys, pwd, updated):
changed = False
name_change = False
if updated and new_name:
name = new_name
try:
current_keys = [ck['access_key_id'] for ck in
iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
status = [ck['status'] for ck in
iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
key_qty = len(current_keys)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if 'cannot be found' in error_msg and updated:
current_keys = [ck['access_key_id'] for ck in
iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata]
status = [ck['status'] for ck in
iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata]
name = new_name
else:
module.fail_json(changed=False, msg=str(err))
updated_key_list = {}
if new_name or new_path:
c_path = iam.get_user(name).get_user_result.user['path']
if (name != new_name) or (c_path != new_path):
changed = True
try:
if not updated:
user = iam.update_user(
name, new_user_name=new_name, new_path=new_path).update_user_response.response_metadata
else:
user = iam.update_user(
name, new_path=new_path).update_user_response.response_metadata
user['updates'] = dict(
old_username=name, new_username=new_name, old_path=c_path, new_path=new_path)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
module.fail_json(changed=False, msg=str(err))
else:
if not updated:
name_change = True
if pwd:
try:
iam.update_login_profile(name, pwd)
changed = True
except boto.exception.BotoServerError:
try:
iam.create_login_profile(name, pwd)
changed = True
except boto.exception.BotoServerError as err:
error_msg = boto_exception(str(err))
if 'Password does not conform to the account password policy' in error_msg:
module.fail_json(changed=False, msg="Password doesn't conform to policy")
else:
module.fail_json(msg=error_msg)
try:
current_keys = [ck['access_key_id'] for ck in
iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
status = [ck['status'] for ck in
iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
key_qty = len(current_keys)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if 'cannot be found' in error_msg and updated:
current_keys = [ck['access_key_id'] for ck in
iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata]
status = [ck['status'] for ck in
iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata]
name = new_name
else:
module.fail_json(changed=False, msg=str(err))
new_keys = []
if key_state == 'create':
try:
while key_count > key_qty:
new_keys.append(iam.create_access_key(
user_name=name).create_access_key_response.create_access_key_result.access_key)
key_qty += 1
changed = True
except boto.exception.BotoServerError as err:
module.fail_json(changed=False, msg=str(err))
if keys and key_state:
for access_key in keys:
if key_state in ('active', 'inactive'):
if access_key in current_keys:
for current_key, current_key_state in zip(current_keys, status):
if key_state != current_key_state.lower():
try:
iam.update_access_key(access_key, key_state.capitalize(), user_name=name)
changed = True
except boto.exception.BotoServerError as err:
module.fail_json(changed=False, msg=str(err))
else:
module.fail_json(msg="Supplied keys not found for %s. "
"Current keys: %s. "
"Supplied key(s): %s" %
(name, current_keys, keys)
)
if key_state == 'remove':
if access_key in current_keys:
try:
iam.delete_access_key(access_key, user_name=name)
except boto.exception.BotoServerError as err:
module.fail_json(changed=False, msg=str(err))
else:
changed = True
try:
final_keys, final_key_status = \
[ck['access_key_id'] for ck in
iam.get_all_access_keys(name).
list_access_keys_result.
access_key_metadata],\
[ck['status'] for ck in
iam.get_all_access_keys(name).
list_access_keys_result.
access_key_metadata]
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg=str(err))
for fk, fks in zip(final_keys, final_key_status):
updated_key_list.update({fk: fks})
return name_change, updated_key_list, changed, new_keys
def set_users_groups(module, iam, name, groups, updated=None,
new_name=None):
""" Sets groups for a user, will purge groups not explicitly passed, while
retaining pre-existing groups that also are in the new list.
"""
changed = False
if updated:
name = new_name
try:
orig_users_groups = [og['group_name'] for og in iam.get_groups_for_user(
name).list_groups_for_user_result.groups]
remove_groups = [
rg for rg in frozenset(orig_users_groups).difference(groups)]
new_groups = [
ng for ng in frozenset(groups).difference(orig_users_groups)]
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg=str(err))
else:
if len(orig_users_groups) > 0:
for new in new_groups:
iam.add_user_to_group(new, name)
for rm in remove_groups:
iam.remove_user_from_group(rm, name)
else:
for group in groups:
try:
iam.add_user_to_group(group, name)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if ('The group with name %s cannot be found.' % group) in error_msg:
module.fail_json(changed=False, msg="Group %s doesn't exist" % group)
if len(remove_groups) > 0 or len(new_groups) > 0:
changed = True
return (groups, changed)
def create_group(module=None, iam=None, name=None, path=None):
changed = False
try:
iam.create_group(
name, path).create_group_response.create_group_result.group
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg=str(err))
else:
changed = True
return name, changed
def delete_group(module=None, iam=None, name=None):
changed = False
try:
iam.delete_group(name)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if ('must delete policies first') in error_msg:
for policy in iam.get_all_group_policies(name).list_group_policies_result.policy_names:
iam.delete_group_policy(name, policy)
try:
iam.delete_group(name)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if ('must delete policies first') in error_msg:
module.fail_json(changed=changed, msg="All inline polices have been removed. Though it appears"
"that %s has Managed Polices. This is not "
"currently supported by boto. Please detach the polices "
"through the console and try again." % name)
else:
module.fail_json(changed=changed, msg=str(error_msg))
else:
changed = True
else:
module.fail_json(changed=changed, msg=str(error_msg))
else:
changed = True
return changed, name
def update_group(module=None, iam=None, name=None, new_name=None, new_path=None):
changed = False
try:
current_group_path = iam.get_group(
name).get_group_response.get_group_result.group['path']
if new_path:
if current_group_path != new_path:
iam.update_group(name, new_path=new_path)
changed = True
if new_name:
if name != new_name:
iam.update_group(name, new_group_name=new_name, new_path=new_path)
changed = True
name = new_name
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg=str(err))
return changed, name, new_path, current_group_path
def create_role(module, iam, name, path, role_list, prof_list, trust_policy_doc):
changed = False
iam_role_result = None
instance_profile_result = None
try:
if name not in role_list:
changed = True
iam_role_result = iam.create_role(name,
assume_role_policy_document=trust_policy_doc,
path=path).create_role_response.create_role_result.role
if name not in prof_list:
instance_profile_result = iam.create_instance_profile(name, path=path) \
.create_instance_profile_response.create_instance_profile_result.instance_profile
iam.add_role_to_instance_profile(name, name)
else:
instance_profile_result = iam.get_instance_profile(name).get_instance_profile_response.get_instance_profile_result.instance_profile
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg=str(err))
else:
updated_role_list = list_all_roles(iam)
iam_role_result = iam.get_role(name).get_role_response.get_role_result.role
return changed, updated_role_list, iam_role_result, instance_profile_result
def delete_role(module, iam, name, role_list, prof_list):
changed = False
iam_role_result = None
instance_profile_result = None
try:
if name in role_list:
cur_ins_prof = [rp['instance_profile_name'] for rp in
iam.list_instance_profiles_for_role(name).
list_instance_profiles_for_role_result.
instance_profiles]
for profile in cur_ins_prof:
iam.remove_role_from_instance_profile(profile, name)
try:
iam.delete_role(name)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if ('must detach all policies first') in error_msg:
for policy in iam.list_role_policies(name).list_role_policies_result.policy_names:
iam.delete_role_policy(name, policy)
try:
iam_role_result = iam.delete_role(name)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if ('must detach all policies first') in error_msg:
module.fail_json(changed=changed, msg="All inline polices have been removed. Though it appears"
"that %s has Managed Polices. This is not "
"currently supported by boto. Please detach the polices "
"through the console and try again." % name)
else:
module.fail_json(changed=changed, msg=str(err))
else:
changed = True
else:
changed = True
for prof in prof_list:
if name == prof:
instance_profile_result = iam.delete_instance_profile(name)
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg=str(err))
else:
updated_role_list = list_all_roles(iam)
return changed, updated_role_list, iam_role_result, instance_profile_result
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
iam_type=dict(
default=None, required=True, choices=['user', 'group', 'role']),
groups=dict(type='list', default=None, required=False),
state=dict(
default=None, required=True, choices=['present', 'absent', 'update']),
password=dict(default=None, required=False, no_log=True),
update_password=dict(default='always', required=False, choices=['always', 'on_create']),
access_key_state=dict(default=None, required=False, choices=[
'active', 'inactive', 'create', 'remove',
'Active', 'Inactive', 'Create', 'Remove']),
access_key_ids=dict(type='list', default=None, required=False),
key_count=dict(type='int', default=1, required=False),
name=dict(default=None, required=False),
trust_policy_filepath=dict(default=None, required=False),
trust_policy=dict(type='dict', default=None, required=False),
new_name=dict(default=None, required=False),
path=dict(default='/', required=False),
new_path=dict(default=None, required=False)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[['trust_policy', 'trust_policy_filepath']],
)
if not HAS_BOTO:
module.fail_json(msg='This module requires boto, please install it')
state = module.params.get('state').lower()
iam_type = module.params.get('iam_type').lower()
groups = module.params.get('groups')
name = module.params.get('name')
new_name = module.params.get('new_name')
password = module.params.get('password')
update_pw = module.params.get('update_password')
path = module.params.get('path')
new_path = module.params.get('new_path')
key_count = module.params.get('key_count')
key_state = module.params.get('access_key_state')
trust_policy = module.params.get('trust_policy')
trust_policy_filepath = module.params.get('trust_policy_filepath')
key_ids = module.params.get('access_key_ids')
if key_state:
key_state = key_state.lower()
if any([n in key_state for n in ['active', 'inactive']]) and not key_ids:
module.fail_json(changed=False, msg="At least one access key has to be defined in order"
" to use 'active' or 'inactive'")
if iam_type == 'user' and module.params.get('password') is not None:
pwd = module.params.get('password')
elif iam_type != 'user' and module.params.get('password') is not None:
module.fail_json(msg="a password is being specified when the iam_type "
"is not user. Check parameters")
else:
pwd = None
if iam_type != 'user' and (module.params.get('access_key_state') is not None or
module.params.get('access_key_id') is not None):
module.fail_json(msg="the IAM type must be user, when IAM access keys "
"are being modified. Check parameters")
if iam_type == 'role' and state == 'update':
module.fail_json(changed=False, msg="iam_type: role, cannot currently be updated, "
"please specify present or absent")
# check if trust_policy is present -- it can be inline JSON or a file path to a JSON file
if trust_policy_filepath:
try:
with open(trust_policy_filepath, 'r') as json_data:
trust_policy_doc = json.dumps(json.load(json_data))
except Exception as e:
module.fail_json(msg=str(e) + ': ' + trust_policy_filepath)
elif trust_policy:
try:
trust_policy_doc = json.dumps(trust_policy)
except Exception as e:
module.fail_json(msg=str(e) + ': ' + trust_policy)
else:
trust_policy_doc = None
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
try:
if region:
iam = connect_to_aws(boto.iam, region, **aws_connect_kwargs)
else:
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg=str(e))
result = {}
changed = False
try:
orig_group_list = list_all_groups(iam)
orig_user_list = list_all_users(iam)
orig_role_list = list_all_roles(iam)
orig_prof_list = list_all_instance_profiles(iam)
except boto.exception.BotoServerError as err:
module.fail_json(msg=err.message)
if iam_type == 'user':
been_updated = False
user_groups = None
user_exists = any([n in [name, new_name] for n in orig_user_list])
if user_exists:
current_path = iam.get_user(name).get_user_result.user['path']
if not new_path and current_path != path:
new_path = path
path = current_path
if state == 'present' and not user_exists and not new_name:
(meta, changed) = create_user(
module, iam, name, password, path, key_state, key_count)
keys = iam.get_all_access_keys(name).list_access_keys_result.\
access_key_metadata
if groups:
(user_groups, changed) = set_users_groups(
module, iam, name, groups, been_updated, new_name)
module.exit_json(
user_meta=meta, groups=user_groups, keys=keys, changed=changed)
elif state in ['present', 'update'] and user_exists:
if update_pw == 'on_create':
password = None
if name not in orig_user_list and new_name in orig_user_list:
been_updated = True
name_change, key_list, user_changed, new_key = update_user(
module, iam, name, new_name, new_path, key_state, key_count, key_ids, password, been_updated)
if new_key:
user_meta = {'access_keys': list(new_key)}
user_meta['access_keys'].extend(
[{'access_key_id': key, 'status': value} for key, value in key_list.items() if
key not in [it['access_key_id'] for it in new_key]])
else:
user_meta = {
'access_keys': [{'access_key_id': key, 'status': value} for key, value in key_list.items()]}
if name_change and new_name:
orig_name = name
name = new_name
if isinstance(groups, list):
user_groups, groups_changed = set_users_groups(
module, iam, name, groups, been_updated, new_name)
if groups_changed == user_changed:
changed = groups_changed
else:
changed = True
else:
changed = user_changed
if new_name and new_path:
module.exit_json(changed=changed, groups=user_groups, old_user_name=orig_name,
new_user_name=new_name, old_path=path, new_path=new_path, keys=key_list,
created_keys=new_key, user_meta=user_meta)
elif new_name and not new_path and not been_updated:
module.exit_json(
changed=changed, groups=user_groups, old_user_name=orig_name, new_user_name=new_name, keys=key_list,
created_keys=new_key, user_meta=user_meta)
elif new_name and not new_path and been_updated:
module.exit_json(
changed=changed, groups=user_groups, user_name=new_name, keys=key_list, key_state=key_state,
created_keys=new_key, user_meta=user_meta)
elif not new_name and new_path:
module.exit_json(
changed=changed, groups=user_groups, user_name=name, old_path=path, new_path=new_path,
keys=key_list, created_keys=new_key, user_meta=user_meta)
else:
module.exit_json(
changed=changed, groups=user_groups, user_name=name, keys=key_list, created_keys=new_key,
user_meta=user_meta)
elif state == 'update' and not user_exists:
module.fail_json(
msg="The user %s does not exist. No update made." % name)
elif state == 'absent':
if user_exists:
try:
set_users_groups(module, iam, name, '')
name, changed = delete_user(module, iam, name)
module.exit_json(deleted_user=name, changed=changed)
except Exception as ex:
module.fail_json(changed=changed, msg=str(ex))
else:
module.exit_json(
changed=False, msg="User %s is already absent from your AWS IAM users" % name)
elif iam_type == 'group':
group_exists = name in orig_group_list
if state == 'present' and not group_exists:
new_group, changed = create_group(module=module, iam=iam, name=name, path=path)
module.exit_json(changed=changed, group_name=new_group)
elif state in ['present', 'update'] and group_exists:
changed, updated_name, updated_path, cur_path = update_group(
module=module, iam=iam, name=name, new_name=new_name,
new_path=new_path)
if new_path and new_name:
module.exit_json(changed=changed, old_group_name=name,
new_group_name=updated_name, old_path=cur_path,
new_group_path=updated_path)
if new_path and not new_name:
module.exit_json(changed=changed, group_name=name,
old_path=cur_path,
new_group_path=updated_path)
if not new_path and new_name:
module.exit_json(changed=changed, old_group_name=name,
new_group_name=updated_name, group_path=cur_path)
if not new_path and not new_name:
module.exit_json(
changed=changed, group_name=name, group_path=cur_path)
elif state == 'update' and not group_exists:
module.fail_json(
changed=changed, msg="Update Failed. Group %s doesn't seem to exist!" % name)
elif state == 'absent':
if name in orig_group_list:
removed_group, changed = delete_group(module=module, iam=iam, name=name)
module.exit_json(changed=changed, delete_group=removed_group)
else:
module.exit_json(changed=changed, msg="Group already absent")
elif iam_type == 'role':
role_list = []
if state == 'present':
changed, role_list, role_result, instance_profile_result = create_role(
module, iam, name, path, orig_role_list, orig_prof_list, trust_policy_doc)
elif state == 'absent':
changed, role_list, role_result, instance_profile_result = delete_role(
module, iam, name, orig_role_list, orig_prof_list)
elif state == 'update':
module.fail_json(
changed=False, msg='Role update not currently supported by boto.')
module.exit_json(changed=changed, roles=role_list, role_result=role_result,
instance_profile_result=instance_profile_result)
if __name__ == '__main__':
main()
| {
"content_hash": "599816caf42a6f9fd1637ed6e7d7c7f2",
"timestamp": "",
"source": "github",
"line_count": 851,
"max_line_length": 158,
"avg_line_length": 41.37602820211516,
"alnum_prop": 0.5789952003635228,
"repo_name": "SergeyCherepanov/ansible",
"id": "33ae98485495fddc09c790c4184fee8f526431d2",
"size": "35352",
"binary": false,
"copies": "32",
"ref": "refs/heads/master",
"path": "ansible/ansible/modules/cloud/amazon/iam.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
} |
"""Minimal setup script to appease buildout for Melange.
"""
import os
import re
from setuptools import setup, find_packages
match_version = re.compile("version: ([0-9\-]+)")
try:
appyaml = open(os.path.join("app", "app.yaml.template"))
version = match_version.findall(appyaml.read())[0]
except:
version = "UNKNOWN"
setup(
name = 'melange',
description=("The goal of this project is to create a framework for "
"representing Open Source contribution workflows, such as"
" the existing Google Summer of Code TM (GSoC) program."),
version = version,
package_dir = {'':'src'},
packages=find_packages('src'),
author=open("AUTHORS").read(),
url='http://code.google.com/p/soc',
license='Apache2',
install_requires = [
'PyYAML',
'WebOb',
'zope.testbrowser',
'pylint',
'nose',
'Django==1.1.0',
'fixture',
],
tests_require=[
],
entry_points = {'console_scripts': ['run-tests = tests.run:main',
'gen-app-yaml = scripts.gen_app_yaml:main',
],
},
include_package_data = True,
zip_safe = False,
)
| {
"content_hash": "3bc3114d58e2452d4457ae27adbd5f16",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 83,
"avg_line_length": 29.325581395348838,
"alnum_prop": 0.5448057097541633,
"repo_name": "MatthewWilkes/mw4068-packaging",
"id": "b40e7cfe9b1b93895e1c58349faa7be27e0af84e",
"size": "1261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/melange/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "68827"
},
{
"name": "HTML",
"bytes": "586705"
},
{
"name": "JavaScript",
"bytes": "441502"
},
{
"name": "Python",
"bytes": "2136551"
},
{
"name": "Shell",
"bytes": "5667"
}
],
"symlink_target": ""
} |
"""Tabs API Tests for Version 1.0.
This is a testing template for the generated TabsAPI Class.
"""
import unittest
import requests
import secrets
from pycanvas.apis.tabs import TabsAPI
from pycanvas.apis.tabs import Tab
class TestTabsAPI(unittest.TestCase):
"""Tests for the TabsAPI."""
def setUp(self):
self.client = TabsAPI(secrets.instance_address, secrets.access_token)
def test_list_available_tabs_for_course_or_group_courses(self):
"""Integration test for the TabsAPI.list_available_tabs_for_course_or_group_courses method."""
course_id = None # Change me!!
r = self.client.list_available_tabs_for_course_or_group_courses(course_id, include=None)
def test_list_available_tabs_for_course_or_group_groups(self):
"""Integration test for the TabsAPI.list_available_tabs_for_course_or_group_groups method."""
group_id = None # Change me!!
r = self.client.list_available_tabs_for_course_or_group_groups(group_id, include=None)
def test_update_tab_for_course(self):
"""Integration test for the TabsAPI.update_tab_for_course method."""
# This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.
pass
| {
"content_hash": "a794bf01741f5f869342220b5c9b566a",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 125,
"avg_line_length": 38.3235294117647,
"alnum_prop": 0.6914811972371451,
"repo_name": "PGower/PyCanvas",
"id": "f38dd89c054f1e92eab425278a678796bc4e43da",
"size": "1303",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycanvas/tests/tabs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1877053"
}
],
"symlink_target": ""
} |
import requests
response = requests.get('http://localhost:5000/rainfall-api/v1/years/2010/data')
response.json()
| {
"content_hash": "756dbdd55576182aa9558347f79465ad",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 80,
"avg_line_length": 16.714285714285715,
"alnum_prop": 0.7521367521367521,
"repo_name": "alysbrett/rainfall-api",
"id": "3564b6de5bd265375f0008441d30fc2a96055328",
"size": "117",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5557"
}
],
"symlink_target": ""
} |
from django.db import models
from django.contrib.auth.models import User
from unidecode import unidecode
from tagging.fields import TagField, Tag
class Categories(models.Model):
# yazi basligi ve sef turu
title = models.CharField(max_length=255, verbose_name="Baslik")
sef_title = models.CharField(max_length=255, blank=True, editable=False)
# url slug yapisi
slug = models.SlugField(max_length=255, verbose_name="Slug")
# meta descriptionda cikacak aciklama bolumu seo icin bi hayli onemli
description = models.CharField(max_length=255, verbose_name="Aciklama")
sef_description = models.CharField(max_length=255, blank=True, editable=False)
def __unicode__(self):
return self.title
class Meta:
verbose_name_plural = "Kategoriler"
def get_absolute_url(self):
return "/blog/kategori/%s/" %self.slug
def save(self, *args, **kwargs):
self.sef_title = unidecode(self.title)
super(Categories, self).save(*args, **kwargs)
self.sef_description = unidecode(self.description)
super(Categories, self).save(*args, **kwargs)
class Posts(models.Model):
# yazi basligi ve sef turu
title = models.CharField(max_length=255, verbose_name="Baslik")
sef_title = models.CharField(max_length=255, blank=True, editable=False)
# url slug yapisi
slug = models.SlugField(max_length=255, verbose_name="Slug")
# meta descriptionda cikacak aciklama bolumu seo icin bi hayli onemli
description = models.CharField(max_length=255, verbose_name="Aciklama")
sef_description = models.CharField(max_length=255, blank=True, editable=False)
# kategori secimi
categories = models.ManyToManyField(Categories, verbose_name="Kategoriler", blank=True)
# yazi icerik kismi
hom_content = models.TextField(verbose_name="Anasayfa Icerik")
content = models.TextField(verbose_name="Icerik")
# yazi yayindami kontrolu
isonline = models.BooleanField(verbose_name="Yazi yayinlansin mi?", default=False)
# etiketlerimiz
tags = TagField(verbose_name="Etiketler")
# olusturulma ve degistirilme tarihleri admin panelinde gozukmez
created = models.DateTimeField(auto_now_add=True, editable=False, verbose_name="Olusturulma")
edited = models.DateTimeField(auto_now=True, editable=False, verbose_name="Degistirilme")
# yazar secimi
author = models.ForeignKey(User, verbose_name="Yazar")
# unicode fonksiyonumuz
def __unicode__(self):
return self.title
class Meta:
verbose_name_plural = "Yazilar"
def get_absolute_url(self):
return "/blog/%s/" %self.slug
def save(self, *args, **kwargs):
self.sef_title = unidecode(self.title)
super(Posts, self).save(*args, **kwargs)
self.sef_description = unidecode(self.description)
super(Posts, self).save(*args, **kwargs)
def get_tags(self):
return Tag.objects.get_for_object(self)
| {
"content_hash": "0e2b034883f595f41cb6c5c0bcd16468",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 105,
"avg_line_length": 35.51724137931034,
"alnum_prop": 0.6679611650485436,
"repo_name": "halitalptekin/PyTr",
"id": "330bbe22ec81ca832191e77dba1d43c0fb7486c8",
"size": "3090",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/blog/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "37833"
}
],
"symlink_target": ""
} |
import web2kindle.script.zhihu_collection
import web2kindle.script.zhihu_answers
import web2kindle.script.zhihu_zhuanlan
import web2kindle.script.qdaily
import web2kindle.script.guoke_scientific
import web2kindle.script.jianshu_wenji
import web2kindle.script.jianshu_user
import web2kindle.script.jianshu_zhuanti
if __name__ == '__main__':
web2kindle.script.zhihu_collection.main(['207961272'], 1, float('inf'),
{'img': False, 'gif': False, 'email': False})
web2kindle.script.zhihu_answers.main(['zhong-wen-sen'], 1, 20, {'img': True, 'gif': False})
web2kindle.script.zhihu_zhuanlan.main(['PatrickZhang'], 0, 20, {'img': False, 'gif': False, 'email': False})
web2kindle.script.guoke_scientific.main(20, 30, {'img': True, 'gif': False})
web2kindle.script.qdaily.main('2017-12-21', '2017-12-21',
{'img': True, 'gif': False, 'type': 'home', 'email': False})
web2kindle.script.zhihu_daily_main()
web2kindle.script.jianshu_wenji.main(['4431344'], 1, 2, {'img': True, 'gif': False, 'email': False})
web2kindle.script.jianshu_user.main(['4d70b2bba306'], 1, 2, {'img': True, 'gif': False, 'email': False})
web2kindle.script.jianshu_zhuanti.main(['1a54c5910458'], 1, 2, {'img': True, 'gif': False, 'email': False})
| {
"content_hash": "ffb523a6f436dcce43df927b286c0db9",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 112,
"avg_line_length": 53,
"alnum_prop": 0.6535849056603774,
"repo_name": "wax8280/web2kindle",
"id": "ebbba23cd7ae27b6d4d8611a088ed11f8a6472ba",
"size": "1534",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/main_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "16334"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "170680"
}
],
"symlink_target": ""
} |
"""
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param head: The first node of linked list.
@return: a tree node
"""
def getLength(self, head):
if head == None:
return 0
i = 0
while head:
head = head.next
i += 1
return i
def sortedListToBST(self, head):
# write your code here
if head == None:
return None
current = head
size = self.getLength(head)
if size == 1:
return head
return self.sortedListToBSTHelper(size, current)
def sortedListToBSTHelper(self, size, head):
if size ==0:
return None
root = TreeNode(0)
root.left = self.sortedListToBSTHelper(head, size / 2)
root.val = head.val
head = head.next
root.right = self.sortedListToBSTHelper(head, size - size/2 -1)
return root
| {
"content_hash": "f6f94b2f569dfc8a0e5d4d47dc2f0a4c",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 71,
"avg_line_length": 25.06382978723404,
"alnum_prop": 0.5441426146010186,
"repo_name": "quake0day/oj",
"id": "dd92ed0fedd07bfc7b37e010c827620e8a2c53eb",
"size": "1178",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Convert Sorted List to Balanced BST.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "5897"
},
{
"name": "Java",
"bytes": "691"
},
{
"name": "JavaScript",
"bytes": "1131"
},
{
"name": "Python",
"bytes": "294929"
}
],
"symlink_target": ""
} |
import pytest
from datadog_checks.etcd import Etcd
from .common import REMAPED_DEBUGGING_METRICS, STORE_METRICS, URL
from .utils import is_leader, legacy, preview
pytestmark = pytest.mark.e2e
@preview
def test_new(dd_agent_check, instance, openmetrics_metrics):
aggregator = dd_agent_check(instance, rate=True)
tags = ['is_leader:{}'.format('true' if is_leader(URL) else 'false')]
for metric in openmetrics_metrics:
aggregator.assert_metric('etcd.{}'.format(metric), tags=tags, at_least=0)
for metric in REMAPED_DEBUGGING_METRICS:
aggregator.assert_metric('etcd.{}'.format(metric), at_least=1)
aggregator.assert_all_metrics_covered()
service_check_tags = ['endpoint:{}'.format(instance['prometheus_url'])]
aggregator.assert_service_check('etcd.prometheus.health', Etcd.OK, tags=service_check_tags, count=2)
@legacy
def test_legacy(dd_agent_check, instance):
aggregator = dd_agent_check(instance, rate=True)
tags = ['url:{}'.format(URL), 'etcd_state:{}'.format('leader' if is_leader(URL) else 'follower')]
for mname in STORE_METRICS:
aggregator.assert_metric('etcd.store.{}'.format(mname), tags=tags, at_least=1)
aggregator.assert_metric('etcd.self.send.appendrequest.count', tags=tags, at_least=1)
aggregator.assert_metric('etcd.self.recv.appendrequest.count', tags=tags, at_least=1)
service_check_tags = ['url:{}'.format(URL), 'etcd_state:{}'.format('leader' if is_leader(URL) else 'follower')]
aggregator.assert_service_check(Etcd.SERVICE_CHECK_NAME, tags=service_check_tags, count=2)
aggregator.assert_service_check(Etcd.HEALTH_SERVICE_CHECK_NAME, tags=service_check_tags[:1], count=2)
| {
"content_hash": "2da6f9c87c547ab2778416828b4d5b66",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 115,
"avg_line_length": 37.577777777777776,
"alnum_prop": 0.7120047309284447,
"repo_name": "DataDog/integrations-core",
"id": "239c77f362b11886b9010d997d006b663efb5073",
"size": "1806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "etcd/tests/test_e2e.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "578"
},
{
"name": "COBOL",
"bytes": "12312"
},
{
"name": "Dockerfile",
"bytes": "22998"
},
{
"name": "Erlang",
"bytes": "15518"
},
{
"name": "Go",
"bytes": "6988"
},
{
"name": "HCL",
"bytes": "4080"
},
{
"name": "HTML",
"bytes": "1318"
},
{
"name": "JavaScript",
"bytes": "1817"
},
{
"name": "Kotlin",
"bytes": "430"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "PHP",
"bytes": "20"
},
{
"name": "PowerShell",
"bytes": "2398"
},
{
"name": "Python",
"bytes": "13020828"
},
{
"name": "Roff",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "241"
},
{
"name": "Scala",
"bytes": "7000"
},
{
"name": "Shell",
"bytes": "83227"
},
{
"name": "Swift",
"bytes": "203"
},
{
"name": "TSQL",
"bytes": "29972"
},
{
"name": "TypeScript",
"bytes": "1019"
}
],
"symlink_target": ""
} |
"""
Example demonstrating how to fit a complex H-alpha profile after subtracting off a satellite line
(in this case, He I 6678.151704)
"""
import pyspeckit
sp = pyspeckit.OpticalSpectrum('sn2009ip_halpha.fits')
# start by plotting a small region around the H-alpha line
sp.plotter(xmin=6100,xmax=7000,ymax=2.23,ymin=0)
# the baseline (continuum) fit will be 2nd order, and excludes "bad"
# parts of the spectrum
# The exclusion zone was selected interatively (i.e., cursor hovering over the spectrum)
sp.baseline(xmin=6100, xmax=7000,
exclude=[6450,6746,6815,6884,7003,7126,7506,7674,8142,8231],
subtract=False, reset_selection=True, highlight_fitregion=True,
order=2)
# Fit a 4-parameter voigt (figured out through a series if guess and check fits)
sp.specfit(guesses=[2.4007096541802202, 6563.2307968382256, 3.5653446153950314, 1,
0.53985149324131965, 6564.3460908526877, 19.443226155616617, 1,
0.11957267912208754, 6678.3853431367716, 4.1892742162283181, 1,
0.10506431180136294, 6589.9310414408683, 72.378997529374672, 1,],
fittype='voigt')
# Now overplot the fitted components with an offset so we can see them
# the add_baseline=True bit means that each component will be displayed with the "Continuum" added
# If this was off, the components would be displayed at y=0
# the component_yoffset is the offset to add to the continuum for plotting only (a constant)
sp.specfit.plot_components(add_baseline=True,component_yoffset=-0.2)
# Now overplot the residuals on the same graph by specifying which axis to overplot it on
# clear=False is needed to keep the original fitted plot drawn
# yoffset is the offset from y=zero
sp.specfit.plotresiduals(axis=sp.plotter.axis,clear=False,yoffset=0.20,label=False)
# save the figure
sp.plotter.savefig("SN2009ip_UT121002_Halpha_voigt_zoom.png")
# print the fit results in table form
# This includes getting the equivalent width for each component using sp.specfit.EQW
print " ".join(["%15s %15s" % (s,s+"err") for s in sp.specfit.parinfo.parnames])," ".join(["%15s" % ("EQW"+str(i)) for i,w in enumerate(sp.specfit.EQW(components=True))])
print " ".join(["%15g %15g" % (par.value,par.error) for par in sp.specfit.parinfo])," ".join(["%15g" % w for w in sp.specfit.EQW(components=True)])
# here are some other fitted parameters that can be printed:
print "Fitted EQW:", sp.specfit.EQW()
print "Direct EQW:", sp.specfit.EQW(fitted=False)
print "Approximate FWHM:", sp.specfit.measure_approximate_fwhm()
print "Approximate FWHM (with interpolation):", sp.specfit.measure_approximate_fwhm(interpolate_factor=10)
# zoom in further for a detailed view of the profile fit
sp.plotter.axis.set_xlim(6562-150,6562+150)
sp.plotter.savefig("SN2009ip_UT121002_Halpha_voigt_zoomzoom.png")
# now we'll re-do the fit with the He I line subtracted off
# first, create a copy of the spectrum
just_halpha = sp.copy()
# Second, subtract off the model fit for the He I component
# (identify it by looking at the fitted central wavelengths)
just_halpha.data -= sp.specfit.modelcomponents[2,:]
# re-plot
just_halpha.plotter(xmin=6100,xmax=7000,ymax=2.00,ymin=-0.3)
# this time, subtract off the baseline - we're now confident that the continuum
# fit is good enough
just_halpha.baseline(xmin=6100, xmax=7000,
exclude=[6450,6746,6815,6884,7003,7126,7506,7674,8142,8231],
subtract=True, reset_selection=True, highlight_fitregion=True, order=2)
# Do a 3-component fit now that the Helium line is gone
# I've added some limits here because I know what parameters I expect of my fitted line
just_halpha.specfit(guesses=[2.4007096541802202, 6563.2307968382256, 3.5653446153950314, 1,
0.53985149324131965, 6564.3460908526877, 19.443226155616617, 1,
0.10506431180136294, 6589.9310414408683, 50.378997529374672, 1,],
fittype='voigt',
xmin=6100,xmax=7000,
limitedmax=[False,False,True,True]*3,
limitedmin=[True,False,True,True]*3,
limits=[(0,0),(0,0),(0,100),(0,100)]*3)
# overplot the components and residuals again
just_halpha.specfit.plot_components(add_baseline=False,component_yoffset=-0.1)
just_halpha.specfit.plotresiduals(axis=just_halpha.plotter.axis,clear=False,yoffset=-0.20,label=False)
# The "optimal chi^2" isn't a real statistical concept, it's something I made up
# However, I think it makes sense (but post an issue if you disagree!):
# It uses the fitted model to find all pixels that are above the noise in the spectrum
# then computes chi^2/n using only those pixels
just_halpha.specfit.annotate(chi2='optimal')
# save the figure
just_halpha.plotter.savefig("SN2009ip_UT121002_Halpha_voigt_threecomp.png")
# A new zoom-in figure
import pylab
# now hide the legend
just_halpha.specfit.fitleg.set_visible(False)
# overplot a y=0 line through the residuals (for reference)
pylab.plot([6100,7000],[-0.2,-0.2],'y--')
# zoom vertically
pylab.gca().set_ylim(-0.3,0.3)
# redraw & save
pylab.draw()
just_halpha.plotter.savefig("SN2009ip_UT121002_Halpha_voigt_threecomp_zoom.png")
# Part of the reason for doing the above work is to demonstrate that a
# 3-component fit is better than a 2-component fit
#
# So, now we do the same as above with a 2-component fit
just_halpha.plotter(xmin=6100,xmax=7000,ymax=2.00,ymin=-0.3)
just_halpha.specfit(guesses=[2.4007096541802202, 6563.2307968382256, 3.5653446153950314, 1,
0.53985149324131965, 6564.3460908526877, 19.443226155616617, 1],
fittype='voigt')
just_halpha.specfit.plot_components(add_baseline=False,component_yoffset=-0.1)
just_halpha.specfit.plotresiduals(axis=just_halpha.plotter.axis,clear=False,yoffset=-0.20,label=False)
just_halpha.specfit.annotate(chi2='optimal')
just_halpha.plotter.savefig("SN2009ip_UT121002_Halpha_voigt_twocomp.png")
just_halpha.specfit.fitleg.set_visible(False)
pylab.plot([6100,7000],[-0.2,-0.2],'y--')
pylab.gca().set_ylim(-0.3,0.3)
pylab.draw()
just_halpha.plotter.savefig("SN2009ip_UT121002_Halpha_voigt_twocomp_zoom.png")
| {
"content_hash": "de8c5a1cf7a001fe4f69e3c541ab77b8",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 170,
"avg_line_length": 47.775193798449614,
"alnum_prop": 0.7275677429823139,
"repo_name": "keflavich/pyspeckit-obsolete",
"id": "f6b051222c40e2f25ade5a03bef15c4d9f78d2e5",
"size": "6163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/sn_example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3626"
},
{
"name": "Python",
"bytes": "1133508"
},
{
"name": "TeX",
"bytes": "1170"
}
],
"symlink_target": ""
} |
from os.path import join as pjoin
import logging
from collections import defaultdict
log = logging.getLogger("main")
from ete2.tools.phylobuild_lib.task import Msf
from ete2.tools.phylobuild_lib.master_task import ConcatAlgTask
from ete2.tools.phylobuild_lib.master_job import Job
from ete2.tools.phylobuild_lib.utils import SeqGroup, GLOBALS, generate_runid, strip, pexist, md5
from ete2.tools.phylobuild_lib import db
from ete2.tools.phylobuild_lib.errors import TaskError
__all__ = ["ConcatAlg"]
class ConcatAlg(ConcatAlgTask):
def __init__(self, cogs, seqtype, conf, confname, workflow_checksum):
self.confname = confname
self.conf = conf
#self.cogs_hard_limit = int(conf[confname]["_max_cogs"])
#used_cogs = cogs[:self.cogs_hard_limit]
used_cogs = cogs
cog_string = '#'.join([','.join(sorted(c)) for c in used_cogs])
cog_keyid = md5(cog_string) # This will be nodeid
base_args = {}
ConcatAlgTask.__init__(self, cog_keyid, "concat_alg", "ConcatAlg",
workflow_checksum=workflow_checksum,
base_args=base_args, extra_args=conf[confname])
self.avail_cogs = len(cogs)
self.used_cogs = len(used_cogs)
self.cogs = used_cogs
self.seqtype = seqtype
self.cog_ids = set()
self.job2alg = {}
self.job2model = {}
if seqtype == "aa":
self.default_model = conf[confname]["_default_aa_model"]
elif seqtype == "nt":
self.default_model = conf[confname]["_default_nt_model"]
self.genetree_workflow = conf[confname]["_workflow"][1:]
self.init()
def load_jobs(self):
# I want a single phylognetic tree for each cog
from ete2.tools.phylobuild_lib.workflow.genetree import pipeline
for co in self.cogs:
# Register a new msf task for each COG, using the same
# config file but opening an new tree reconstruction
# thread.
job = Msf(set(co), set(), seqtype = self.seqtype)
job.main_tree = None
job.threadid = generate_runid()
job.configid = self.conf["_configid"]
# This converts the job in a workflow job. As soon as a
# task is done, it will be automatically processed and the
# new tasks will be registered as new jobs.
job.task_processor = pipeline
job.target_wkname = self.genetree_workflow
self.jobs.append(job)
self.cog_ids.add(job.nodeid)
def finish(self):
# Assumes tasks resulting from genetree workflow, in which
# only Alg and Acleaner tasks could contain the results
log.log(26, "Collecting supermatrix data")
jobtypes = set()
job2alg, job2acleaner = {}, {}
for job in self.jobs:
jobtypes.add(job.ttype)
if job.ttype == "alg" and job.nodeid not in self.job2alg:
dataid = db.get_dataid(*job.alg_fasta_file.split("."))
job2alg[job.nodeid] = db.get_data(dataid)
elif job.ttype == "acleaner":
a, b = job.clean_alg_fasta_file.split(".")
dataid = db.get_dataid(*job.clean_alg_fasta_file.split("."))
job2acleaner[job.nodeid] = db.get_data(dataid)
elif job.ttype == "mchooser":
self.job2model[job.nodeid] = job.best_model
if "acleaner" in jobtypes:
self.job2alg = job2acleaner
else:
self.job2alg = job2alg
if self.cog_ids - set(self.job2alg):
log.error("Missing %s algs", len(self.cog_ids -
set(self.job2alg)))
missing = self.cog_ids - set(self.job2alg)
raise TaskError(self, "Missing algs (%d): i.e. %s" %(len(missing),missing[:10]))
alg_data = [(self.job2alg[nid],
self.job2model.get(nid, self.default_model))
for nid in self.job2alg]
filenames, models = zip(*alg_data)
mainalg, partitions, sp2alg, species, alg_lenghts = get_concatenated_alg(
filenames,
models, sp_field=0,
sp_delimiter=GLOBALS["spname_delimiter"])
log.log(20, "Done concat alg, now writting fasta format")
fasta = mainalg.write(format="fasta")
log.log(20, "Done concat alg, now writting phylip format")
phylip = mainalg.write(format="iphylip_relaxed")
txt_partitions = '\n'.join(partitions)
log.log(26, "Modeled regions: \n"+'\n'.join(partitions))
ConcatAlg.store_data(self, fasta, phylip, txt_partitions)
def get_species_code(name, splitter, field):
# By default, taxid is the first par of the seqid, separated by
# underscore
return map(strip, name.split(splitter, 1))[field]
def get_concatenated_alg(alg_filenames, models=None,
sp_field=0, sp_delimiter="_",
kill_thr=0.0,
keep_species=set()):
# Concat alg container
concat = SeqGroup()
# Used to store different model partitions
concat.id2partition = {}
if not models:
models = ["None"]*len(alg_filenames)
else:
if len(models) != len(alg_filenames):
raise ValueError("Different number of algs and model names was found!")
expected_total_length = 0
# Check algs and gets the whole set of species
alg_objects = []
sp2alg = defaultdict(list)
for algfile, matrix in zip(alg_filenames, models):
alg = SeqGroup(algfile, "fasta")
alg_objects.append(alg)
lenseq = None
browsed_species = set()
alg.sp2seq = {}
# Set best matrix for this alignment
alg.matrix = matrix
# Change seq names to contain only species names
for i, seq in alg.id2seq.iteritems():
name = db.get_seq_name(alg.id2name[i])
taxid = get_species_code(name, splitter=sp_delimiter, field=sp_field)
if lenseq is not None and len(seq) != lenseq:
raise Exception("Inconsistent alignment when concatenating: Unequal length")
elif lenseq is None:
lenseq = len(seq)
alg.seqlength = len(seq)
expected_total_length += len(seq)
if taxid in browsed_species:
raise Exception("Inconsistent alignment when concatenating: Repeated species")
browsed_species.add(taxid) # Check no duplicated species in the same alg
sp2alg[taxid].append(alg) # Records all species seen in all algs.
alg.sp2seq[taxid] = seq
valid_species = [sp for sp in sp2alg.iterkeys() \
if sp in keep_species or \
len(sp2alg[sp])/float(len(alg_objects)) > kill_thr]
log.info("%d out of %d will be kept (missing factor threshold=%g, %d species forced to kept)" %\
(len(valid_species), len(sp2alg), kill_thr, len(keep_species)))
def sort_single_algs(alg1, alg2):
r = cmp(alg1.matrix, alg2.matrix)
if r == 0:
return cmp(sorted(alg1.id2name.values()),
sorted(alg2.id2name.values()))
else:
return r
sorted_algs = sorted(alg_objects, sort_single_algs)
concat_alg_lengths = [alg.seqlength for alg in sorted_algs]
model2win = {}
model2size = {}
for alg in sorted_algs:
model2size[alg.matrix] = model2size.get(alg.matrix, 0) + alg.seqlength
# Create concat alg
concat.id2seq = defaultdict(list)
for sp in sorted(valid_species):
log.log(20, "Concatenating sequences of [%s]" %sp)
for alg in sorted_algs:
seq = alg.sp2seq.get(sp, "-" * alg.seqlength)
concat.id2seq[sp].append(seq)
#current_seq = concat.id2seq.get(sp, "")
#concat.id2seq[sp] = current_seq + seq.strip()
concat.id2name[sp] = sp
concat.name2id[sp] = sp
concat.id2comment[sp] = [""]
concat.id2seq[sp] = ''.join(concat.id2seq[sp])
current_pos = 0
partitions = []
for model in sorted(model2size.keys()):
size = model2size[model]
part = "%s, %s = %d-%d" % (model, model+"_genes", \
current_pos + 1,\
current_pos + size)
current_pos += size
partitions.append(part)
# Basic Checks
seq_sizes = [len(seq) for seq in concat.id2seq.values()]
if len(set(seq_sizes)) != 1:
raise Exception("Concatenated alignment is not consistent: unequal seq length ")
if seq_sizes[0] != expected_total_length:
raise Exception("The size of concatenated alg is not what expected")
return concat, partitions, sp2alg, valid_species, concat_alg_lengths
| {
"content_hash": "12cbf61fd038ec390c4724d67254071b",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 100,
"avg_line_length": 41.89302325581395,
"alnum_prop": 0.5799933385144888,
"repo_name": "sauloal/cnidaria",
"id": "89e3f0348c2071873828720f6b6c55c4dfde7386",
"size": "10466",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scripts/venv/lib/python2.7/site-packages/ete2/tools/phylobuild_lib/task/concat_alg.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1696790"
},
{
"name": "C++",
"bytes": "3035466"
},
{
"name": "CSS",
"bytes": "20306"
},
{
"name": "FORTRAN",
"bytes": "3707"
},
{
"name": "Groff",
"bytes": "32478"
},
{
"name": "HTML",
"bytes": "19658"
},
{
"name": "JavaScript",
"bytes": "250616"
},
{
"name": "Jupyter Notebook",
"bytes": "8401292"
},
{
"name": "M4",
"bytes": "3905"
},
{
"name": "Makefile",
"bytes": "177650"
},
{
"name": "Objective-C",
"bytes": "1701"
},
{
"name": "Python",
"bytes": "28122291"
},
{
"name": "R",
"bytes": "86108"
},
{
"name": "Shell",
"bytes": "676123"
}
],
"symlink_target": ""
} |
import json
import os
from celery import Celery
from .models import RecommendRoutesRequest, RecommendRoutesTask, RecommendRoutesTaskStatus
from .services import recommend_routes_service
app = Celery(__name__)
app.conf.update(
broker_url=os.environ.get('CELERY_BROKER_URL', "redis://localhost:6379/0"),
result_backend=os.environ.get('CELERY_RESULT_BACKEND', "redis://localhost:6379/0"),
# https://medium.com/koko-networks/a-complete-guide-to-production-ready-celery-configuration-5777780b3166
worker_send_task_event=False,
task_time_limit=60 * 15,
task_acks_late=True,
worker_prefetch_multiplier=1
)
@app.task
def recommend_routes_task(request_dict: dict) -> str:
return recommend_routes_service(RecommendRoutesRequest(**request_dict))
def recommend_routes_task_start(request: RecommendRoutesRequest) -> RecommendRoutesTask:
celery_task = recommend_routes_task.delay(request.dict())
return RecommendRoutesTask(
task_id=celery_task.id,
status=RecommendRoutesTaskStatus(celery_task.status.lower()),
request=request,
result=json.loads(celery_task.result) if celery_task.ready() else None
)
def recommend_routes_task_result(task_id: str) -> RecommendRoutesTask:
celery_task = recommend_routes_task.AsyncResult(task_id)
status = RecommendRoutesTaskStatus(celery_task.status.lower())
return RecommendRoutesTask(
task_id=celery_task.id,
request=None,
status=status,
error_message=str(celery_task.result) if status == RecommendRoutesTaskStatus.FAILURE else None,
result=json.loads(celery_task.result) if status == RecommendRoutesTaskStatus.SUCCESS else None
)
| {
"content_hash": "5eb0df7606012dda1070691f9eb33a24",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 109,
"avg_line_length": 36.80434782608695,
"alnum_prop": 0.7365623154164206,
"repo_name": "TRI-AMDD/piro",
"id": "ad3a881eea355b5d51532dc8513d4a4da0f114c3",
"size": "1693",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "web/backend/app/tasks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1765"
},
{
"name": "Dockerfile",
"bytes": "3515"
},
{
"name": "HTML",
"bytes": "1768"
},
{
"name": "JavaScript",
"bytes": "2316"
},
{
"name": "Jupyter Notebook",
"bytes": "8895693"
},
{
"name": "Python",
"bytes": "194084"
},
{
"name": "Shell",
"bytes": "759"
},
{
"name": "TypeScript",
"bytes": "38099"
}
],
"symlink_target": ""
} |
'''
phone_communication_backup_coalescer
Copyright 2016, Phillip Green II
Licensed under MIT.
'''
import datetime
import logging
import rx
from rx.core import AnonymousObservable
import xml.etree.ElementTree as ET
from phone_communication_backup_coalescer.files import dir_to_files_mapper
from phone_communication_backup_coalescer import __version__, __name__
def as_list(item):
if not hasattr(item, '__iter__'):
item = [item]
return item
class Coalescer:
def __init__(self, controller):
self._controller = controller
def coalesce(self, source_dirs, output_file_name):
def write_tree(tree):
with open(output_file_name, 'w') as f:
xml_declaration = ET.ProcessingInstruction('xml',
"version='1.0' encoding='UTF-8' standalone='yes'")
build_info = ET.Comment('Created by {} v{} on {}'.format(__name__, __version__, datetime.datetime.now()))
xsl_declaration = ET.ProcessingInstruction('xml-stylesheet',
"type='text/xsl' href='{}'".format(self._controller.xsl_file_name))
f.write(ET.tostring(xml_declaration))
f.write(ET.tostring(build_info))
f.write(ET.tostring(xsl_declaration))
tree.write(f)
def append_item_to_tree(root, item):
self._controller.tree_appender(root, item)
return root
meta = [[], 0]
def rememberFile(f):
meta[0].append(f)
def safely_parse(f):
def subscribe(observer):
try:
items = list(self._controller.parse_file(f))
except Exception as ex:
error = {'type': 'error', 'value': ex, 'file': f}
observer.on_next(error)
observer.on_completed()
return
for item in items:
observer.on_next(item)
observer.on_completed()
return AnonymousObservable(subscribe)
def increment_counter(_):
meta[1] += 1
def print_errors(e):
if e['type'] == 'error':
logging.error('Error: %s', e['value'])
# TODO print warnings
source = rx.Observable.from_iterable(as_list(source_dirs))\
.flat_map(dir_to_files_mapper(self._controller.filename_pattern))\
.distinct()\
.do_action(rememberFile)\
.do_action(lambda f: logging.info('processing %s', f))\
.flat_map(safely_parse)\
.do_action(print_errors)\
.where(lambda e: e['type'] == 'item')\
.map(lambda e: e['value'])\
.distinct()\
.do_action(increment_counter)\
.to_list()\
.flat_map(lambda l: self._controller.sort(l))\
.reduce(append_item_to_tree, self._controller.tree_seed())\
.do_action(lambda _: logging.info('writing %s', output_file_name))\
.subscribe(write_tree)
return tuple(meta)
| {
"content_hash": "8018a2eb253d9bcc4ca366d01ea50958",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 127,
"avg_line_length": 35.49438202247191,
"alnum_prop": 0.5349794238683128,
"repo_name": "phillipgreenii/phone_communication_backup_coalescer",
"id": "3e98f653dad95083feaef31e4e0efd78b0a58fcc",
"size": "3159",
"binary": false,
"copies": "1",
"ref": "refs/heads/python-upgrade",
"path": "phone_communication_backup_coalescer/coalesce.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "94810"
},
{
"name": "XSLT",
"bytes": "4626"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""code generator for GLES2 command buffers."""
import itertools
import os
import os.path
import sys
import re
from optparse import OptionParser
_SIZE_OF_UINT32 = 4
_SIZE_OF_COMMAND_HEADER = 4
_FIRST_SPECIFIC_COMMAND_ID = 256
_LICENSE = """// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
"""
_DO_NOT_EDIT_WARNING = """// This file is auto-generated from
// gpu/command_buffer/build_gles2_cmd_buffer.py
// DO NOT EDIT!
"""
# This string is copied directly out of the gl2.h file from GLES2.0
#
# Edits:
#
# *) Any argument that is a resourceID has been changed to GLid<Type>.
# (not pointer arguments) and if it's allowed to be zero it's GLidZero<Type>
# If it's allowed to not exist it's GLidBind<Type>
#
# *) All GLenums have been changed to GLenumTypeOfEnum
#
_GL_TYPES = {
'GLenum': 'unsigned int',
'GLboolean': 'unsigned char',
'GLbitfield': 'unsigned int',
'GLbyte': 'signed char',
'GLshort': 'short',
'GLint': 'int',
'GLsizei': 'int',
'GLubyte': 'unsigned char',
'GLushort': 'unsigned short',
'GLuint': 'unsigned int',
'GLfloat': 'float',
'GLclampf': 'float',
'GLvoid': 'void',
'GLfixed': 'int',
'GLclampx': 'int',
'GLintptr': 'long int',
'GLsizeiptr': 'long int',
}
# Capabilites selected with glEnable
_CAPABILITY_FLAGS = [
{'name': 'blend'},
{'name': 'cull_face'},
{'name': 'depth_test', 'state_flag': 'clear_state_dirty_'},
{'name': 'dither', 'default': True},
{'name': 'polygon_offset_fill'},
{'name': 'sample_alpha_to_coverage'},
{'name': 'sample_coverage'},
{'name': 'scissor_test', 'state_flag': 'clear_state_dirty_'},
{'name': 'stencil_test', 'state_flag': 'clear_state_dirty_'},
]
_STATES = {
'ClearColor': {
'type': 'Normal',
'func': 'ClearColor',
'enum': 'GL_COLOR_CLEAR_VALUE',
'states': [
{'name': 'color_clear_red', 'type': 'GLfloat', 'default': '0.0f'},
{'name': 'color_clear_green', 'type': 'GLfloat', 'default': '0.0f'},
{'name': 'color_clear_blue', 'type': 'GLfloat', 'default': '0.0f'},
{'name': 'color_clear_alpha', 'type': 'GLfloat', 'default': '0.0f'},
],
},
'ClearDepthf': {
'type': 'Normal',
'func': 'ClearDepth',
'enum': 'GL_DEPTH_CLEAR_VALUE',
'states': [
{'name': 'depth_clear', 'type': 'GLclampf', 'default': '1.0f'},
],
},
'ColorMask': {
'type': 'Normal',
'func': 'ColorMask',
'enum': 'GL_COLOR_WRITEMASK',
'states': [
{'name': 'color_mask_red', 'type': 'GLboolean', 'default': 'true'},
{'name': 'color_mask_green', 'type': 'GLboolean', 'default': 'true'},
{'name': 'color_mask_blue', 'type': 'GLboolean', 'default': 'true'},
{'name': 'color_mask_alpha', 'type': 'GLboolean', 'default': 'true'},
],
'state_flag': 'clear_state_dirty_',
},
'ClearStencil': {
'type': 'Normal',
'func': 'ClearStencil',
'enum': 'GL_STENCIL_CLEAR_VALUE',
'states': [
{'name': 'stencil_clear', 'type': 'GLint', 'default': '0'},
],
},
'BlendColor': {
'type': 'Normal',
'func': 'BlendColor',
'enum': 'GL_BLEND_COLOR',
'states': [
{'name': 'blend_color_red', 'type': 'GLfloat', 'default': '0.0f'},
{'name': 'blend_color_green', 'type': 'GLfloat', 'default': '0.0f'},
{'name': 'blend_color_blue', 'type': 'GLfloat', 'default': '0.0f'},
{'name': 'blend_color_alpha', 'type': 'GLfloat', 'default': '0.0f'},
],
},
'BlendEquation': {
'type': 'SrcDst',
'func': 'BlendEquationSeparate',
'states': [
{
'name': 'blend_equation_rgb',
'type': 'GLenum',
'enum': 'GL_BLEND_EQUATION_RGB',
'default': 'GL_FUNC_ADD',
},
{
'name': 'blend_equation_alpha',
'type': 'GLenum',
'enum': 'GL_BLEND_EQUATION_ALPHA',
'default': 'GL_FUNC_ADD',
},
],
},
'BlendFunc': {
'type': 'SrcDst',
'func': 'BlendFuncSeparate',
'states': [
{
'name': 'blend_source_rgb',
'type': 'GLenum',
'enum': 'GL_BLEND_SRC_RGB',
'default': 'GL_ONE',
},
{
'name': 'blend_dest_rgb',
'type': 'GLenum',
'enum': 'GL_BLEND_DST_RGB',
'default': 'GL_ZERO',
},
{
'name': 'blend_source_alpha',
'type': 'GLenum',
'enum': 'GL_BLEND_SRC_ALPHA',
'default': 'GL_ONE',
},
{
'name': 'blend_dest_alpha',
'type': 'GLenum',
'enum': 'GL_BLEND_DST_ALPHA',
'default': 'GL_ZERO',
},
],
},
'PolygonOffset': {
'type': 'Normal',
'func': 'PolygonOffset',
'states': [
{
'name': 'polygon_offset_factor',
'type': 'GLfloat',
'enum': 'GL_POLYGON_OFFSET_FACTOR',
'default': '0.0f',
},
{
'name': 'polygon_offset_units',
'type': 'GLfloat',
'enum': 'GL_POLYGON_OFFSET_UNITS',
'default': '0.0f',
},
],
},
'CullFace': {
'type': 'Normal',
'func': 'CullFace',
'enum': 'GL_CULL_FACE_MODE',
'states': [
{
'name': 'cull_mode',
'type': 'GLenum',
'default': 'GL_BACK',
},
],
},
'FrontFace': {
'type': 'Normal',
'func': 'FrontFace',
'enum': 'GL_FRONT_FACE',
'states': [{'name': 'front_face', 'type': 'GLenum', 'default': 'GL_CCW'}],
},
'DepthFunc': {
'type': 'Normal',
'func': 'DepthFunc',
'enum': 'GL_DEPTH_FUNC',
'states': [{'name': 'depth_func', 'type': 'GLenum', 'default': 'GL_LESS'}],
},
'DepthRange': {
'type': 'Normal',
'func': 'DepthRange',
'enum': 'GL_DEPTH_RANGE',
'states': [
{'name': 'z_near', 'type': 'GLclampf', 'default': '0.0f'},
{'name': 'z_far', 'type': 'GLclampf', 'default': '1.0f'},
],
},
'SampleCoverage': {
'type': 'Normal',
'func': 'SampleCoverage',
'states': [
{
'name': 'sample_coverage_value',
'type': 'GLclampf',
'enum': 'GL_SAMPLE_COVERAGE_VALUE',
'default': '1.0f',
},
{
'name': 'sample_coverage_invert',
'type': 'GLboolean',
'enum': 'GL_SAMPLE_COVERAGE_INVERT',
'default': 'false',
},
],
},
'StencilMask': {
'type': 'FrontBack',
'func': 'StencilMaskSeparate',
'state_flag': 'clear_state_dirty_',
'states': [
{
'name': 'stencil_front_writemask',
'type': 'GLuint',
'enum': 'GL_STENCIL_WRITEMASK',
'default': '0xFFFFFFFFU',
},
{
'name': 'stencil_back_writemask',
'type': 'GLuint',
'enum': 'GL_STENCIL_BACK_WRITEMASK',
'default': '0xFFFFFFFFU',
},
],
},
'StencilOp': {
'type': 'FrontBack',
'func': 'StencilOpSeparate',
'states': [
{
'name': 'stencil_front_fail_op',
'type': 'GLenum',
'enum': 'GL_STENCIL_FAIL',
'default': 'GL_KEEP',
},
{
'name': 'stencil_front_z_fail_op',
'type': 'GLenum',
'enum': 'GL_STENCIL_PASS_DEPTH_FAIL',
'default': 'GL_KEEP',
},
{
'name': 'stencil_front_z_pass_op',
'type': 'GLenum',
'enum': 'GL_STENCIL_PASS_DEPTH_PASS',
'default': 'GL_KEEP',
},
{
'name': 'stencil_back_fail_op',
'type': 'GLenum',
'enum': 'GL_STENCIL_BACK_FAIL',
'default': 'GL_KEEP',
},
{
'name': 'stencil_back_z_fail_op',
'type': 'GLenum',
'enum': 'GL_STENCIL_BACK_PASS_DEPTH_FAIL',
'default': 'GL_KEEP',
},
{
'name': 'stencil_back_z_pass_op',
'type': 'GLenum',
'enum': 'GL_STENCIL_BACK_PASS_DEPTH_PASS',
'default': 'GL_KEEP',
},
],
},
'StencilFunc': {
'type': 'FrontBack',
'func': 'StencilFuncSeparate',
'states': [
{
'name': 'stencil_front_func',
'type': 'GLenum',
'enum': 'GL_STENCIL_FUNC',
'default': 'GL_ALWAYS',
},
{
'name': 'stencil_front_ref',
'type': 'GLint',
'enum': 'GL_STENCIL_REF',
'default': '0',
},
{
'name': 'stencil_front_mask',
'type': 'GLuint',
'enum': 'GL_STENCIL_VALUE_MASK',
'default': '0xFFFFFFFFU',
},
{
'name': 'stencil_back_func',
'type': 'GLenum',
'enum': 'GL_STENCIL_BACK_FUNC',
'default': 'GL_ALWAYS',
},
{
'name': 'stencil_back_ref',
'type': 'GLint',
'enum': 'GL_STENCIL_BACK_REF',
'default': '0',
},
{
'name': 'stencil_back_mask',
'type': 'GLuint',
'enum': 'GL_STENCIL_BACK_VALUE_MASK',
'default': '0xFFFFFFFFU',
},
],
},
# TODO: Consider implemenenting these states
# GL_GENERATE_MIPMAP_HINT
# GL_ACTIVE_TEXTURE,
# GL_PACK_ALIGNMENT,
# GL_UNPACK_ALIGNMENT
'LineWidth': {
'type': 'Normal',
'func': 'LineWidth',
'enum': 'GL_LINE_WIDTH',
'states': [
{
'name': 'line_width',
'type': 'GLfloat',
'default': '1.0f',
'range_checks': [{'check': "<= 0.0f", 'test_value': "0.0f"}],
}],
},
'DepthMask': {
'type': 'Normal',
'func': 'DepthMask',
'enum': 'GL_DEPTH_WRITEMASK',
'states': [
{'name': 'depth_mask', 'type': 'GLboolean', 'default': 'true'},
],
'state_flag': 'clear_state_dirty_',
},
'Scissor': {
'type': 'Normal',
'func': 'Scissor',
'enum': 'GL_SCISSOR_BOX',
'states': [
# NOTE: These defaults reset at GLES2DecoderImpl::Initialization.
{
'name': 'scissor_x',
'type': 'GLint',
'default': '0',
'expected': 'kViewportX',
},
{
'name': 'scissor_y',
'type': 'GLint',
'default': '0',
'expected': 'kViewportY',
},
{
'name': 'scissor_width',
'type': 'GLsizei',
'default': '1',
'expected': 'kViewportWidth',
},
{
'name': 'scissor_height',
'type': 'GLsizei',
'default': '1',
'expected': 'kViewportHeight',
},
],
},
'Viewport': {
'type': 'Normal',
'func': 'Viewport',
'enum': 'GL_VIEWPORT',
'states': [
# NOTE: These defaults reset at GLES2DecoderImpl::Initialization.
{
'name': 'viewport_x',
'type': 'GLint',
'default': '0',
'expected': 'kViewportX',
},
{
'name': 'viewport_y',
'type': 'GLint',
'default': '0',
'expected': 'kViewportY',
},
{
'name': 'viewport_width',
'type': 'GLsizei',
'default': '1',
'expected': 'kViewportWidth',
},
{
'name': 'viewport_height',
'type': 'GLsizei',
'default': '1',
'expected': 'kViewportHeight',
},
],
},
}
# This is a list of enum names and their valid values. It is used to map
# GLenum arguments to a specific set of valid values.
_ENUM_LISTS = {
'BlitFilter': {
'type': 'GLenum',
'valid': [
'GL_NEAREST',
'GL_LINEAR',
],
'invalid': [
'GL_LINEAR_MIPMAP_LINEAR',
],
},
'FrameBufferTarget': {
'type': 'GLenum',
'valid': [
'GL_FRAMEBUFFER',
],
'invalid': [
'GL_DRAW_FRAMEBUFFER' ,
'GL_READ_FRAMEBUFFER' ,
],
},
'RenderBufferTarget': {
'type': 'GLenum',
'valid': [
'GL_RENDERBUFFER',
],
'invalid': [
'GL_FRAMEBUFFER',
],
},
'BufferTarget': {
'type': 'GLenum',
'valid': [
'GL_ARRAY_BUFFER',
'GL_ELEMENT_ARRAY_BUFFER',
],
'invalid': [
'GL_RENDERBUFFER',
],
},
'BufferUsage': {
'type': 'GLenum',
'valid': [
'GL_STREAM_DRAW',
'GL_STATIC_DRAW',
'GL_DYNAMIC_DRAW',
],
'invalid': [
'GL_STATIC_READ',
],
},
'CompressedTextureFormat': {
'type': 'GLenum',
'valid': [
],
},
'GLState': {
'type': 'GLenum',
'valid': [
# NOTE: State an Capability entries added later.
'GL_ACTIVE_TEXTURE',
'GL_ALIASED_LINE_WIDTH_RANGE',
'GL_ALIASED_POINT_SIZE_RANGE',
'GL_ALPHA_BITS',
'GL_ARRAY_BUFFER_BINDING',
'GL_BLUE_BITS',
'GL_COMPRESSED_TEXTURE_FORMATS',
'GL_CURRENT_PROGRAM',
'GL_DEPTH_BITS',
'GL_DEPTH_RANGE',
'GL_ELEMENT_ARRAY_BUFFER_BINDING',
'GL_FRAMEBUFFER_BINDING',
'GL_GENERATE_MIPMAP_HINT',
'GL_GREEN_BITS',
'GL_IMPLEMENTATION_COLOR_READ_FORMAT',
'GL_IMPLEMENTATION_COLOR_READ_TYPE',
'GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS',
'GL_MAX_CUBE_MAP_TEXTURE_SIZE',
'GL_MAX_FRAGMENT_UNIFORM_VECTORS',
'GL_MAX_RENDERBUFFER_SIZE',
'GL_MAX_TEXTURE_IMAGE_UNITS',
'GL_MAX_TEXTURE_SIZE',
'GL_MAX_VARYING_VECTORS',
'GL_MAX_VERTEX_ATTRIBS',
'GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS',
'GL_MAX_VERTEX_UNIFORM_VECTORS',
'GL_MAX_VIEWPORT_DIMS',
'GL_NUM_COMPRESSED_TEXTURE_FORMATS',
'GL_NUM_SHADER_BINARY_FORMATS',
'GL_PACK_ALIGNMENT',
'GL_RED_BITS',
'GL_RENDERBUFFER_BINDING',
'GL_SAMPLE_BUFFERS',
'GL_SAMPLE_COVERAGE_INVERT',
'GL_SAMPLE_COVERAGE_VALUE',
'GL_SAMPLES',
'GL_SCISSOR_BOX',
'GL_SHADER_BINARY_FORMATS',
'GL_SHADER_COMPILER',
'GL_SUBPIXEL_BITS',
'GL_STENCIL_BITS',
'GL_TEXTURE_BINDING_2D',
'GL_TEXTURE_BINDING_CUBE_MAP',
'GL_UNPACK_ALIGNMENT',
'GL_UNPACK_FLIP_Y_CHROMIUM',
'GL_UNPACK_PREMULTIPLY_ALPHA_CHROMIUM',
'GL_UNPACK_UNPREMULTIPLY_ALPHA_CHROMIUM',
# we can add this because we emulate it if the driver does not support it.
'GL_VERTEX_ARRAY_BINDING_OES',
'GL_VIEWPORT',
],
'invalid': [
'GL_FOG_HINT',
],
},
'GetTexParamTarget': {
'type': 'GLenum',
'valid': [
'GL_TEXTURE_2D',
'GL_TEXTURE_CUBE_MAP',
],
'invalid': [
'GL_PROXY_TEXTURE_CUBE_MAP',
]
},
'TextureTarget': {
'type': 'GLenum',
'valid': [
'GL_TEXTURE_2D',
'GL_TEXTURE_CUBE_MAP_POSITIVE_X',
'GL_TEXTURE_CUBE_MAP_NEGATIVE_X',
'GL_TEXTURE_CUBE_MAP_POSITIVE_Y',
'GL_TEXTURE_CUBE_MAP_NEGATIVE_Y',
'GL_TEXTURE_CUBE_MAP_POSITIVE_Z',
'GL_TEXTURE_CUBE_MAP_NEGATIVE_Z',
],
'invalid': [
'GL_PROXY_TEXTURE_CUBE_MAP',
]
},
'TextureBindTarget': {
'type': 'GLenum',
'valid': [
'GL_TEXTURE_2D',
'GL_TEXTURE_CUBE_MAP',
],
'invalid': [
'GL_TEXTURE_1D',
'GL_TEXTURE_3D',
],
},
'ShaderType': {
'type': 'GLenum',
'valid': [
'GL_VERTEX_SHADER',
'GL_FRAGMENT_SHADER',
],
'invalid': [
'GL_GEOMETRY_SHADER',
],
},
'FaceType': {
'type': 'GLenum',
'valid': [
'GL_FRONT',
'GL_BACK',
'GL_FRONT_AND_BACK',
],
},
'FaceMode': {
'type': 'GLenum',
'valid': [
'GL_CW',
'GL_CCW',
],
},
'CmpFunction': {
'type': 'GLenum',
'valid': [
'GL_NEVER',
'GL_LESS',
'GL_EQUAL',
'GL_LEQUAL',
'GL_GREATER',
'GL_NOTEQUAL',
'GL_GEQUAL',
'GL_ALWAYS',
],
},
'Equation': {
'type': 'GLenum',
'valid': [
'GL_FUNC_ADD',
'GL_FUNC_SUBTRACT',
'GL_FUNC_REVERSE_SUBTRACT',
],
'invalid': [
'GL_MIN',
'GL_MAX',
],
},
'SrcBlendFactor': {
'type': 'GLenum',
'valid': [
'GL_ZERO',
'GL_ONE',
'GL_SRC_COLOR',
'GL_ONE_MINUS_SRC_COLOR',
'GL_DST_COLOR',
'GL_ONE_MINUS_DST_COLOR',
'GL_SRC_ALPHA',
'GL_ONE_MINUS_SRC_ALPHA',
'GL_DST_ALPHA',
'GL_ONE_MINUS_DST_ALPHA',
'GL_CONSTANT_COLOR',
'GL_ONE_MINUS_CONSTANT_COLOR',
'GL_CONSTANT_ALPHA',
'GL_ONE_MINUS_CONSTANT_ALPHA',
'GL_SRC_ALPHA_SATURATE',
],
},
'DstBlendFactor': {
'type': 'GLenum',
'valid': [
'GL_ZERO',
'GL_ONE',
'GL_SRC_COLOR',
'GL_ONE_MINUS_SRC_COLOR',
'GL_DST_COLOR',
'GL_ONE_MINUS_DST_COLOR',
'GL_SRC_ALPHA',
'GL_ONE_MINUS_SRC_ALPHA',
'GL_DST_ALPHA',
'GL_ONE_MINUS_DST_ALPHA',
'GL_CONSTANT_COLOR',
'GL_ONE_MINUS_CONSTANT_COLOR',
'GL_CONSTANT_ALPHA',
'GL_ONE_MINUS_CONSTANT_ALPHA',
],
},
'Capability': {
'type': 'GLenum',
'valid': ["GL_%s" % cap['name'].upper() for cap in _CAPABILITY_FLAGS],
'invalid': [
'GL_CLIP_PLANE0',
'GL_POINT_SPRITE',
],
},
'DrawMode': {
'type': 'GLenum',
'valid': [
'GL_POINTS',
'GL_LINE_STRIP',
'GL_LINE_LOOP',
'GL_LINES',
'GL_TRIANGLE_STRIP',
'GL_TRIANGLE_FAN',
'GL_TRIANGLES',
],
'invalid': [
'GL_QUADS',
'GL_POLYGON',
],
},
'IndexType': {
'type': 'GLenum',
'valid': [
'GL_UNSIGNED_BYTE',
'GL_UNSIGNED_SHORT',
],
'invalid': [
'GL_UNSIGNED_INT',
'GL_INT',
],
},
'GetMaxIndexType': {
'type': 'GLenum',
'valid': [
'GL_UNSIGNED_BYTE',
'GL_UNSIGNED_SHORT',
'GL_UNSIGNED_INT',
],
'invalid': [
'GL_INT',
],
},
'Attachment': {
'type': 'GLenum',
'valid': [
'GL_COLOR_ATTACHMENT0',
'GL_DEPTH_ATTACHMENT',
'GL_STENCIL_ATTACHMENT',
],
},
'BackbufferAttachment': {
'type': 'GLenum',
'valid': [
'GL_COLOR_EXT',
'GL_DEPTH_EXT',
'GL_STENCIL_EXT',
],
},
'BufferParameter': {
'type': 'GLenum',
'valid': [
'GL_BUFFER_SIZE',
'GL_BUFFER_USAGE',
],
'invalid': [
'GL_PIXEL_PACK_BUFFER',
],
},
'FrameBufferParameter': {
'type': 'GLenum',
'valid': [
'GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE',
'GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME',
'GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL',
'GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE',
],
},
'ProgramParameter': {
'type': 'GLenum',
'valid': [
'GL_DELETE_STATUS',
'GL_LINK_STATUS',
'GL_VALIDATE_STATUS',
'GL_INFO_LOG_LENGTH',
'GL_ATTACHED_SHADERS',
'GL_ACTIVE_ATTRIBUTES',
'GL_ACTIVE_ATTRIBUTE_MAX_LENGTH',
'GL_ACTIVE_UNIFORMS',
'GL_ACTIVE_UNIFORM_MAX_LENGTH',
],
},
'QueryObjectParameter': {
'type': 'GLenum',
'valid': [
'GL_QUERY_RESULT_EXT',
'GL_QUERY_RESULT_AVAILABLE_EXT',
],
},
'QueryParameter': {
'type': 'GLenum',
'valid': [
'GL_CURRENT_QUERY_EXT',
],
},
'QueryTarget': {
'type': 'GLenum',
'valid': [
'GL_ANY_SAMPLES_PASSED_EXT',
'GL_ANY_SAMPLES_PASSED_CONSERVATIVE_EXT',
'GL_COMMANDS_ISSUED_CHROMIUM',
'GL_LATENCY_QUERY_CHROMIUM',
'GL_ASYNC_PIXEL_TRANSFERS_COMPLETED_CHROMIUM',
],
},
'RenderBufferParameter': {
'type': 'GLenum',
'valid': [
'GL_RENDERBUFFER_RED_SIZE',
'GL_RENDERBUFFER_GREEN_SIZE',
'GL_RENDERBUFFER_BLUE_SIZE',
'GL_RENDERBUFFER_ALPHA_SIZE',
'GL_RENDERBUFFER_DEPTH_SIZE',
'GL_RENDERBUFFER_STENCIL_SIZE',
'GL_RENDERBUFFER_WIDTH',
'GL_RENDERBUFFER_HEIGHT',
'GL_RENDERBUFFER_INTERNAL_FORMAT',
],
},
'ShaderParameter': {
'type': 'GLenum',
'valid': [
'GL_SHADER_TYPE',
'GL_DELETE_STATUS',
'GL_COMPILE_STATUS',
'GL_INFO_LOG_LENGTH',
'GL_SHADER_SOURCE_LENGTH',
'GL_TRANSLATED_SHADER_SOURCE_LENGTH_ANGLE',
],
},
'ShaderPrecision': {
'type': 'GLenum',
'valid': [
'GL_LOW_FLOAT',
'GL_MEDIUM_FLOAT',
'GL_HIGH_FLOAT',
'GL_LOW_INT',
'GL_MEDIUM_INT',
'GL_HIGH_INT',
],
},
'StringType': {
'type': 'GLenum',
'valid': [
'GL_VENDOR',
'GL_RENDERER',
'GL_VERSION',
'GL_SHADING_LANGUAGE_VERSION',
'GL_EXTENSIONS',
],
},
'TextureParameter': {
'type': 'GLenum',
'valid': [
'GL_TEXTURE_MAG_FILTER',
'GL_TEXTURE_MIN_FILTER',
'GL_TEXTURE_POOL_CHROMIUM',
'GL_TEXTURE_WRAP_S',
'GL_TEXTURE_WRAP_T',
],
'invalid': [
'GL_GENERATE_MIPMAP',
],
},
'TexturePool': {
'type': 'GLenum',
'valid': [
'GL_TEXTURE_POOL_MANAGED_CHROMIUM',
'GL_TEXTURE_POOL_UNMANAGED_CHROMIUM',
],
},
'TextureWrapMode': {
'type': 'GLenum',
'valid': [
'GL_CLAMP_TO_EDGE',
'GL_MIRRORED_REPEAT',
'GL_REPEAT',
],
},
'TextureMinFilterMode': {
'type': 'GLenum',
'valid': [
'GL_NEAREST',
'GL_LINEAR',
'GL_NEAREST_MIPMAP_NEAREST',
'GL_LINEAR_MIPMAP_NEAREST',
'GL_NEAREST_MIPMAP_LINEAR',
'GL_LINEAR_MIPMAP_LINEAR',
],
},
'TextureMagFilterMode': {
'type': 'GLenum',
'valid': [
'GL_NEAREST',
'GL_LINEAR',
],
},
'TextureUsage': {
'type': 'GLenum',
'valid': [
'GL_NONE',
'GL_FRAMEBUFFER_ATTACHMENT_ANGLE',
],
},
'VertexAttribute': {
'type': 'GLenum',
'valid': [
# some enum that the decoder actually passes through to GL needs
# to be the first listed here since it's used in unit tests.
'GL_VERTEX_ATTRIB_ARRAY_NORMALIZED',
'GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING',
'GL_VERTEX_ATTRIB_ARRAY_ENABLED',
'GL_VERTEX_ATTRIB_ARRAY_SIZE',
'GL_VERTEX_ATTRIB_ARRAY_STRIDE',
'GL_VERTEX_ATTRIB_ARRAY_TYPE',
'GL_CURRENT_VERTEX_ATTRIB',
],
},
'VertexPointer': {
'type': 'GLenum',
'valid': [
'GL_VERTEX_ATTRIB_ARRAY_POINTER',
],
},
'HintTarget': {
'type': 'GLenum',
'valid': [
'GL_GENERATE_MIPMAP_HINT',
],
'invalid': [
'GL_PERSPECTIVE_CORRECTION_HINT',
],
},
'HintMode': {
'type': 'GLenum',
'valid': [
'GL_FASTEST',
'GL_NICEST',
'GL_DONT_CARE',
],
},
'PixelStore': {
'type': 'GLenum',
'valid': [
'GL_PACK_ALIGNMENT',
'GL_UNPACK_ALIGNMENT',
'GL_UNPACK_FLIP_Y_CHROMIUM',
'GL_UNPACK_PREMULTIPLY_ALPHA_CHROMIUM',
'GL_UNPACK_UNPREMULTIPLY_ALPHA_CHROMIUM',
],
'invalid': [
'GL_PACK_SWAP_BYTES',
'GL_UNPACK_SWAP_BYTES',
],
},
'PixelStoreAlignment': {
'type': 'GLint',
'valid': [
'1',
'2',
'4',
'8',
],
'invalid': [
'3',
'9',
],
},
'ReadPixelFormat': {
'type': 'GLenum',
'valid': [
'GL_ALPHA',
'GL_RGB',
'GL_RGBA',
],
},
'PixelType': {
'type': 'GLenum',
'valid': [
'GL_UNSIGNED_BYTE',
'GL_UNSIGNED_SHORT_5_6_5',
'GL_UNSIGNED_SHORT_4_4_4_4',
'GL_UNSIGNED_SHORT_5_5_5_1',
],
'invalid': [
'GL_SHORT',
'GL_INT',
],
},
'ReadPixelType': {
'type': 'GLenum',
'valid': [
'GL_UNSIGNED_BYTE',
'GL_UNSIGNED_SHORT_5_6_5',
'GL_UNSIGNED_SHORT_4_4_4_4',
'GL_UNSIGNED_SHORT_5_5_5_1',
],
'invalid': [
'GL_SHORT',
'GL_INT',
],
},
'RenderBufferFormat': {
'type': 'GLenum',
'valid': [
'GL_RGBA4',
'GL_RGB565',
'GL_RGB5_A1',
'GL_DEPTH_COMPONENT16',
'GL_STENCIL_INDEX8',
],
},
'ShaderBinaryFormat': {
'type': 'GLenum',
'valid': [
],
},
'StencilOp': {
'type': 'GLenum',
'valid': [
'GL_KEEP',
'GL_ZERO',
'GL_REPLACE',
'GL_INCR',
'GL_INCR_WRAP',
'GL_DECR',
'GL_DECR_WRAP',
'GL_INVERT',
],
},
'TextureFormat': {
'type': 'GLenum',
'valid': [
'GL_ALPHA',
'GL_LUMINANCE',
'GL_LUMINANCE_ALPHA',
'GL_RGB',
'GL_RGBA',
],
'invalid': [
'GL_BGRA',
'GL_BGR',
],
},
'TextureInternalFormat': {
'type': 'GLenum',
'valid': [
'GL_ALPHA',
'GL_LUMINANCE',
'GL_LUMINANCE_ALPHA',
'GL_RGB',
'GL_RGBA',
],
'invalid': [
'GL_BGRA',
'GL_BGR',
],
},
'TextureInternalFormatStorage': {
'type': 'GLenum',
'valid': [
'GL_RGB565',
'GL_RGBA4',
'GL_RGB5_A1',
'GL_ALPHA8_EXT',
'GL_LUMINANCE8_EXT',
'GL_LUMINANCE8_ALPHA8_EXT',
'GL_RGB8_OES',
'GL_RGBA8_OES',
],
},
'VertexAttribType': {
'type': 'GLenum',
'valid': [
'GL_BYTE',
'GL_UNSIGNED_BYTE',
'GL_SHORT',
'GL_UNSIGNED_SHORT',
# 'GL_FIXED', // This is not available on Desktop GL.
'GL_FLOAT',
],
'invalid': [
'GL_DOUBLE',
],
},
'TextureBorder': {
'type': 'GLint',
'valid': [
'0',
],
'invalid': [
'1',
],
},
'VertexAttribSize': {
'type': 'GLint',
'valid': [
'1',
'2',
'3',
'4',
],
'invalid': [
'0',
'5',
],
},
'ZeroOnly': {
'type': 'GLint',
'valid': [
'0',
],
'invalid': [
'1',
],
},
'FalseOnly': {
'type': 'GLboolean',
'valid': [
'false',
],
'invalid': [
'true',
],
},
'ResetStatus': {
'type': 'GLenum',
'valid': [
'GL_GUILTY_CONTEXT_RESET_ARB',
'GL_INNOCENT_CONTEXT_RESET_ARB',
'GL_UNKNOWN_CONTEXT_RESET_ARB',
],
},
}
# This table specifies the different pepper interfaces that are supported for
# GL commands. 'dev' is true if it's a dev interface.
_PEPPER_INTERFACES = [
{'name': '', 'dev': False},
{'name': 'InstancedArrays', 'dev': False},
{'name': 'FramebufferBlit', 'dev': False},
{'name': 'FramebufferMultisample', 'dev': False},
{'name': 'ChromiumEnableFeature', 'dev': False},
{'name': 'ChromiumMapSub', 'dev': False},
{'name': 'Query', 'dev': False},
]
# This table specifies types and other special data for the commands that
# will be generated.
#
# Must match function names specified in "cmd_buffer_functions.txt".
#
# cmd_comment: A comment added to the cmd format.
# type: defines which handler will be used to generate code.
# decoder_func: defines which function to call in the decoder to execute the
# corresponding GL command. If not specified the GL command will
# be called directly.
# gl_test_func: GL function that is expected to be called when testing.
# cmd_args: The arguments to use for the command. This overrides generating
# them based on the GL function arguments.
# a NonImmediate type is a type that stays a pointer even in
# and immediate version of acommand.
# gen_cmd: Whether or not this function geneates a command. Default = True.
# immediate: Whether or not to generate an immediate command for the GL
# function. The default is if there is exactly 1 pointer argument
# in the GL function an immediate command is generated.
# bucket: True to generate a bucket version of the command.
# impl_func: Whether or not to generate the GLES2Implementation part of this
# command.
# impl_decl: Whether or not to generate the GLES2Implementation declaration
# for this command.
# needs_size: If true a data_size field is added to the command.
# data_type: The type of data the command uses. For PUTn or PUT types.
# count: The number of units per element. For PUTn or PUT types.
# unit_test: If False no service side unit test will be generated.
# client_test: If False no client side unit test will be generated.
# expectation: If False the unit test will have no expected calls.
# gen_func: Name of function that generates GL resource for corresponding
# bind function.
# states: array of states that get set by this function corresponding to
# the given arguments
# state_flag: name of flag that is set to true when function is called.
# no_gl: no GL function is called.
# valid_args: A dictionary of argument indices to args to use in unit tests
# when they can not be automatically determined.
# pepper_interface: The pepper interface that is used for this extension
# invalid_test: False if no invalid test needed.
_FUNCTION_INFO = {
'ActiveTexture': {
'decoder_func': 'DoActiveTexture',
'unit_test': False,
'impl_func': False,
'client_test': False,
},
'AttachShader': {'decoder_func': 'DoAttachShader'},
'BindAttribLocation': {'type': 'GLchar', 'bucket': True, 'needs_size': True},
'BindBuffer': {
'type': 'Bind',
'decoder_func': 'DoBindBuffer',
'gen_func': 'GenBuffersARB',
},
'BindFramebuffer': {
'type': 'Bind',
'decoder_func': 'DoBindFramebuffer',
'gl_test_func': 'glBindFramebufferEXT',
'gen_func': 'GenFramebuffersEXT',
},
'BindRenderbuffer': {
'type': 'Bind',
'decoder_func': 'DoBindRenderbuffer',
'gl_test_func': 'glBindRenderbufferEXT',
'gen_func': 'GenRenderbuffersEXT',
},
'BindTexture': {
'type': 'Bind',
'decoder_func': 'DoBindTexture',
'gen_func': 'GenTextures',
# TODO(gman): remove this once client side caching works.
'client_test': False,
},
'BlitFramebufferEXT': {
'decoder_func': 'DoBlitFramebufferEXT',
'unit_test': False,
'extension': True,
'pepper_interface': 'FramebufferBlit',
'defer_reads': True,
'defer_draws': True,
},
'BufferData': {
'type': 'Manual',
'immediate': True,
'client_test': False,
},
'BufferSubData': {
'type': 'Data',
'client_test': False,
'decoder_func': 'DoBufferSubData',
},
'CheckFramebufferStatus': {
'type': 'Is',
'decoder_func': 'DoCheckFramebufferStatus',
'gl_test_func': 'glCheckFramebufferStatusEXT',
'error_value': 'GL_FRAMEBUFFER_UNSUPPORTED',
'result': ['GLenum'],
},
'Clear': {
'decoder_func': 'DoClear',
'defer_draws': True,
},
'ClearColor': {
'type': 'StateSet',
'state': 'ClearColor',
},
'ClearDepthf': {
'type': 'StateSet',
'state': 'ClearDepthf',
'decoder_func': 'glClearDepth',
'gl_test_func': 'glClearDepth',
'valid_args': {
'0': '0.5f'
},
},
'ColorMask': {
'type': 'StateSet',
'state': 'ColorMask',
'no_gl': True,
'expectation': False,
},
'ConsumeTextureCHROMIUM': {
'decoder_func': 'DoConsumeTextureCHROMIUM',
'type': 'PUT',
'data_type': 'GLbyte',
'count': 64,
'unit_test': False,
'extension': True,
'chromium': True,
},
'ClearStencil': {
'type': 'StateSet',
'state': 'ClearStencil',
},
'EnableFeatureCHROMIUM': {
'type': 'Custom',
'immediate': False,
'decoder_func': 'DoEnableFeatureCHROMIUM',
'expectation': False,
'cmd_args': 'GLuint bucket_id, GLint* result',
'result': ['GLint'],
'extension': True,
'chromium': True,
'pepper_interface': 'ChromiumEnableFeature',
},
'CompileShader': {'decoder_func': 'DoCompileShader', 'unit_test': False},
'CompressedTexImage2D': {
'type': 'Manual',
'immediate': True,
'bucket': True,
},
'CompressedTexSubImage2D': {
'type': 'Data',
'bucket': True,
'decoder_func': 'DoCompressedTexSubImage2D',
},
'CopyTexImage2D': {
'decoder_func': 'DoCopyTexImage2D',
'unit_test': False,
'defer_reads': True,
},
'CopyTexSubImage2D': {
'decoder_func': 'DoCopyTexSubImage2D',
'defer_reads': True,
},
'CreateProgram': {
'type': 'Create',
'client_test': False,
},
'CreateShader': {
'type': 'Create',
'client_test': False,
},
'BlendColor': {
'type': 'StateSet',
'state': 'BlendColor',
},
'BlendEquation': {
'type': 'StateSetRGBAlpha',
'state': 'BlendEquation',
'valid_args': {
'0': 'GL_FUNC_SUBTRACT'
},
},
'BlendEquationSeparate': {
'type': 'StateSet',
'state': 'BlendEquation',
'valid_args': {
'0': 'GL_FUNC_SUBTRACT'
},
},
'BlendFunc': {
'type': 'StateSetRGBAlpha',
'state': 'BlendFunc',
},
'BlendFuncSeparate': {
'type': 'StateSet',
'state': 'BlendFunc',
},
'SampleCoverage': {'decoder_func': 'DoSampleCoverage'},
'StencilFunc': {
'type': 'StateSetFrontBack',
'state': 'StencilFunc',
},
'StencilFuncSeparate': {
'type': 'StateSetFrontBackSeparate',
'state': 'StencilFunc',
},
'StencilOp': {
'type': 'StateSetFrontBack',
'state': 'StencilOp',
'valid_args': {
'1': 'GL_INCR'
},
},
'StencilOpSeparate': {
'type': 'StateSetFrontBackSeparate',
'state': 'StencilOp',
'valid_args': {
'1': 'GL_INCR'
},
},
'Hint': {'decoder_func': 'DoHint'},
'CullFace': {'type': 'StateSet', 'state': 'CullFace'},
'FrontFace': {'type': 'StateSet', 'state': 'FrontFace'},
'DepthFunc': {'type': 'StateSet', 'state': 'DepthFunc'},
'LineWidth': {
'type': 'StateSet',
'state': 'LineWidth',
'valid_args': {
'0': '0.5f'
},
},
'PolygonOffset': {
'type': 'StateSet',
'state': 'PolygonOffset',
},
'DeleteBuffers': {
'type': 'DELn',
'gl_test_func': 'glDeleteBuffersARB',
'resource_type': 'Buffer',
'resource_types': 'Buffers',
},
'DeleteFramebuffers': {
'type': 'DELn',
'gl_test_func': 'glDeleteFramebuffersEXT',
'resource_type': 'Framebuffer',
'resource_types': 'Framebuffers',
},
'DeleteProgram': {'type': 'Delete', 'decoder_func': 'DoDeleteProgram'},
'DeleteRenderbuffers': {
'type': 'DELn',
'gl_test_func': 'glDeleteRenderbuffersEXT',
'resource_type': 'Renderbuffer',
'resource_types': 'Renderbuffers',
},
'DeleteShader': {'type': 'Delete', 'decoder_func': 'DoDeleteShader'},
'DeleteSharedIdsCHROMIUM': {
'type': 'Custom',
'decoder_func': 'DoDeleteSharedIdsCHROMIUM',
'impl_func': False,
'expectation': False,
'immediate': False,
'extension': True,
'chromium': True,
},
'DeleteTextures': {
'type': 'DELn',
'resource_type': 'Texture',
'resource_types': 'Textures',
},
'DepthRangef': {
'decoder_func': 'DoDepthRangef',
'gl_test_func': 'glDepthRange',
},
'DepthMask': {
'type': 'StateSet',
'state': 'DepthMask',
'no_gl': True,
'expectation': False,
},
'DetachShader': {'decoder_func': 'DoDetachShader'},
'Disable': {
'decoder_func': 'DoDisable',
'impl_func': False,
'client_test': False,
},
'DisableVertexAttribArray': {
'decoder_func': 'DoDisableVertexAttribArray',
'impl_decl': False,
},
'DrawArrays': {
'type': 'Manual',
'cmd_args': 'GLenumDrawMode mode, GLint first, GLsizei count',
'defer_draws': True,
},
'DrawElements': {
'type': 'Manual',
'cmd_args': 'GLenumDrawMode mode, GLsizei count, '
'GLenumIndexType type, GLuint index_offset',
'client_test': False,
'defer_draws': True,
},
'Enable': {
'decoder_func': 'DoEnable',
'impl_func': False,
'client_test': False,
},
'EnableVertexAttribArray': {
'decoder_func': 'DoEnableVertexAttribArray',
'impl_decl': False,
},
'Finish': {
'impl_func': False,
'client_test': False,
'decoder_func': 'DoFinish',
},
'Flush': {
'impl_func': False,
'decoder_func': 'DoFlush',
},
'ShallowFlushCHROMIUM': {
'impl_func': False,
'gen_cmd': False,
'extension': True,
'chromium': True,
'client_test': False,
},
'FramebufferRenderbuffer': {
'decoder_func': 'DoFramebufferRenderbuffer',
'gl_test_func': 'glFramebufferRenderbufferEXT',
},
'FramebufferTexture2D': {
'decoder_func': 'DoFramebufferTexture2D',
'gl_test_func': 'glFramebufferTexture2DEXT',
},
'GenerateMipmap': {
'decoder_func': 'DoGenerateMipmap',
'gl_test_func': 'glGenerateMipmapEXT',
},
'GenBuffers': {
'type': 'GENn',
'gl_test_func': 'glGenBuffersARB',
'resource_type': 'Buffer',
'resource_types': 'Buffers',
},
'GenMailboxCHROMIUM': {
'type': 'Manual',
'cmd_args': 'GLuint bucket_id',
'result': ['SizedResult<GLint>'],
'client_test': False,
'unit_test': False,
'extension': True,
'chromium': True,
},
'GenFramebuffers': {
'type': 'GENn',
'gl_test_func': 'glGenFramebuffersEXT',
'resource_type': 'Framebuffer',
'resource_types': 'Framebuffers',
},
'GenRenderbuffers': {
'type': 'GENn', 'gl_test_func': 'glGenRenderbuffersEXT',
'resource_type': 'Renderbuffer',
'resource_types': 'Renderbuffers',
},
'GenTextures': {
'type': 'GENn',
'gl_test_func': 'glGenTextures',
'resource_type': 'Texture',
'resource_types': 'Textures',
},
'GenSharedIdsCHROMIUM': {
'type': 'Custom',
'decoder_func': 'DoGenSharedIdsCHROMIUM',
'impl_func': False,
'expectation': False,
'immediate': False,
'extension': True,
'chromium': True,
},
'GetActiveAttrib': {
'type': 'Custom',
'immediate': False,
'cmd_args':
'GLidProgram program, GLuint index, uint32 name_bucket_id, '
'void* result',
'result': [
'int32 success',
'int32 size',
'uint32 type',
],
},
'GetActiveUniform': {
'type': 'Custom',
'immediate': False,
'cmd_args':
'GLidProgram program, GLuint index, uint32 name_bucket_id, '
'void* result',
'result': [
'int32 success',
'int32 size',
'uint32 type',
],
},
'GetAttachedShaders': {
'type': 'Custom',
'immediate': False,
'cmd_args': 'GLidProgram program, void* result, uint32 result_size',
'result': ['SizedResult<GLuint>'],
},
'GetAttribLocation': {
'type': 'HandWritten',
'immediate': True,
'bucket': True,
'needs_size': True,
'cmd_args':
'GLidProgram program, const char* name, NonImmediate GLint* location',
'result': ['GLint'],
},
'GetBooleanv': {
'type': 'GETn',
'result': ['SizedResult<GLboolean>'],
'decoder_func': 'DoGetBooleanv',
'gl_test_func': 'glGetBooleanv',
},
'GetBufferParameteriv': {'type': 'GETn', 'result': ['SizedResult<GLint>']},
'GetError': {
'type': 'Is',
'decoder_func': 'GetGLError',
'impl_func': False,
'result': ['GLenum'],
'client_test': False,
},
'GetFloatv': {
'type': 'GETn',
'result': ['SizedResult<GLfloat>'],
'decoder_func': 'DoGetFloatv',
'gl_test_func': 'glGetFloatv',
},
'GetFramebufferAttachmentParameteriv': {
'type': 'GETn',
'decoder_func': 'DoGetFramebufferAttachmentParameteriv',
'gl_test_func': 'glGetFramebufferAttachmentParameterivEXT',
'result': ['SizedResult<GLint>'],
},
'GetIntegerv': {
'type': 'GETn',
'result': ['SizedResult<GLint>'],
'decoder_func': 'DoGetIntegerv',
'client_test': False,
},
'GetMaxValueInBufferCHROMIUM': {
'type': 'Is',
'decoder_func': 'DoGetMaxValueInBufferCHROMIUM',
'result': ['GLuint'],
'unit_test': False,
'client_test': False,
'extension': True,
'chromium': True,
'impl_func': False,
},
'GetMultipleIntegervCHROMIUM': {
'type': 'Custom',
'immediate': False,
'expectation': False,
'extension': True,
'chromium': True,
'client_test': False,
},
'GetProgramiv': {
'type': 'GETn',
'decoder_func': 'DoGetProgramiv',
'result': ['SizedResult<GLint>'],
'expectation': False,
},
'GetProgramInfoCHROMIUM': {
'type': 'Custom',
'immediate': False,
'expectation': False,
'impl_func': False,
'extension': True,
'chromium': True,
'client_test': False,
'cmd_args': 'GLidProgram program, uint32 bucket_id',
'result': [
'uint32 link_status',
'uint32 num_attribs',
'uint32 num_uniforms',
],
},
'GetProgramInfoLog': {
'type': 'STRn',
'expectation': False,
},
'GetRenderbufferParameteriv': {
'type': 'GETn',
'decoder_func': 'DoGetRenderbufferParameteriv',
'gl_test_func': 'glGetRenderbufferParameterivEXT',
'result': ['SizedResult<GLint>'],
},
'GetShaderiv': {
'type': 'GETn',
'decoder_func': 'DoGetShaderiv',
'result': ['SizedResult<GLint>'],
},
'GetShaderInfoLog': {
'type': 'STRn',
'get_len_func': 'glGetShaderiv',
'get_len_enum': 'GL_INFO_LOG_LENGTH',
'unit_test': False,
},
'GetShaderPrecisionFormat': {
'type': 'Custom',
'immediate': False,
'cmd_args':
'GLenumShaderType shadertype, GLenumShaderPrecision precisiontype, '
'void* result',
'result': [
'int32 success',
'int32 min_range',
'int32 max_range',
'int32 precision',
],
},
'GetShaderSource': {
'type': 'STRn',
'get_len_func': 'DoGetShaderiv',
'get_len_enum': 'GL_SHADER_SOURCE_LENGTH',
'unit_test': False,
'client_test': False,
},
'GetString': {
'type': 'Custom',
'client_test': False,
'cmd_args': 'GLenumStringType name, uint32 bucket_id',
},
'GetTexParameterfv': {'type': 'GETn', 'result': ['SizedResult<GLfloat>']},
'GetTexParameteriv': {'type': 'GETn', 'result': ['SizedResult<GLint>']},
'GetTranslatedShaderSourceANGLE': {
'type': 'STRn',
'get_len_func': 'DoGetShaderiv',
'get_len_enum': 'GL_TRANSLATED_SHADER_SOURCE_LENGTH_ANGLE',
'unit_test': False,
'extension': True,
},
'GetUniformfv': {
'type': 'Custom',
'immediate': False,
'result': ['SizedResult<GLfloat>'],
},
'GetUniformiv': {
'type': 'Custom',
'immediate': False,
'result': ['SizedResult<GLint>'],
},
'GetUniformLocation': {
'type': 'HandWritten',
'immediate': True,
'bucket': True,
'needs_size': True,
'cmd_args':
'GLidProgram program, const char* name, NonImmediate GLint* location',
'result': ['GLint'],
},
'GetVertexAttribfv': {
'type': 'GETn',
'result': ['SizedResult<GLfloat>'],
'impl_decl': False,
'decoder_func': 'DoGetVertexAttribfv',
'expectation': False,
'client_test': False,
},
'GetVertexAttribiv': {
'type': 'GETn',
'result': ['SizedResult<GLint>'],
'impl_decl': False,
'decoder_func': 'DoGetVertexAttribiv',
'expectation': False,
'client_test': False,
},
'GetVertexAttribPointerv': {
'type': 'Custom',
'immediate': False,
'result': ['SizedResult<GLuint>'],
'client_test': False,
},
'IsBuffer': {
'type': 'Is',
'decoder_func': 'DoIsBuffer',
'expectation': False,
},
'IsEnabled': {
'type': 'Is',
'decoder_func': 'DoIsEnabled',
'impl_func': False,
'expectation': False,
},
'IsFramebuffer': {
'type': 'Is',
'decoder_func': 'DoIsFramebuffer',
'expectation': False,
},
'IsProgram': {
'type': 'Is',
'decoder_func': 'DoIsProgram',
'expectation': False,
},
'IsRenderbuffer': {
'type': 'Is',
'decoder_func': 'DoIsRenderbuffer',
'expectation': False,
},
'IsShader': {
'type': 'Is',
'decoder_func': 'DoIsShader',
'expectation': False,
},
'IsTexture': {
'type': 'Is',
'decoder_func': 'DoIsTexture',
'expectation': False,
},
'LinkProgram': {
'decoder_func': 'DoLinkProgram',
'impl_func': False,
},
'MapBufferCHROMIUM': {
'gen_cmd': False,
'extension': True,
'chromium': True,
'client_test': False,
'chromium': True,
},
'MapBufferSubDataCHROMIUM': {
'gen_cmd': False,
'extension': True,
'chromium': True,
'client_test': False,
'pepper_interface': 'ChromiumMapSub',
},
'MapTexSubImage2DCHROMIUM': {
'gen_cmd': False,
'extension': True,
'chromium': True,
'client_test': False,
'pepper_interface': 'ChromiumMapSub',
},
'PixelStorei': {'type': 'Manual'},
'PostSubBufferCHROMIUM': {
'type': 'Custom',
'impl_func': False,
'unit_test': False,
'client_test': False,
'extension': True,
'chromium': True,
},
'ProduceTextureCHROMIUM': {
'decoder_func': 'DoProduceTextureCHROMIUM',
'type': 'PUT',
'data_type': 'GLbyte',
'count': 64,
'unit_test': False,
'extension': True,
'chromium': True,
},
'RenderbufferStorage': {
'decoder_func': 'DoRenderbufferStorage',
'gl_test_func': 'glRenderbufferStorageEXT',
'expectation': False,
},
'RenderbufferStorageMultisampleEXT': {
'decoder_func': 'DoRenderbufferStorageMultisample',
'gl_test_func': 'glRenderbufferStorageMultisampleEXT',
'expectation': False,
'unit_test': False,
'extension': True,
'pepper_interface': 'FramebufferMultisample',
},
'ReadPixels': {
'cmd_comment':
'// ReadPixels has the result separated from the pixel buffer so that\n'
'// it is easier to specify the result going to some specific place\n'
'// that exactly fits the rectangle of pixels.\n',
'type': 'Custom',
'immediate': False,
'impl_func': False,
'client_test': False,
'cmd_args':
'GLint x, GLint y, GLsizei width, GLsizei height, '
'GLenumReadPixelFormat format, GLenumReadPixelType type, '
'uint32 pixels_shm_id, uint32 pixels_shm_offset, '
'uint32 result_shm_id, uint32 result_shm_offset',
'result': ['uint32'],
'defer_reads': True,
},
'RegisterSharedIdsCHROMIUM': {
'type': 'Custom',
'decoder_func': 'DoRegisterSharedIdsCHROMIUM',
'impl_func': False,
'expectation': False,
'immediate': False,
'extension': True,
'chromium': True,
},
'ReleaseShaderCompiler': {
'decoder_func': 'DoReleaseShaderCompiler',
'unit_test': False,
},
'ShaderBinary': {
'type': 'Custom',
'client_test': False,
},
'ShaderSource': {
'type': 'Manual',
'immediate': True,
'bucket': True,
'needs_size': True,
'client_test': False,
'cmd_args':
'GLuint shader, const char* data',
},
'StencilMask': {
'type': 'StateSetFrontBack',
'state': 'StencilMask',
'no_gl': True,
'expectation': False,
},
'StencilMaskSeparate': {
'type': 'StateSetFrontBackSeparate',
'state': 'StencilMask',
'no_gl': True,
'expectation': False,
},
'SwapBuffers': {
'type': 'Custom',
'impl_func': False,
'unit_test': False,
'client_test': False,
'extension': True,
},
'TexImage2D': {
'type': 'Manual',
'immediate': True,
'client_test': False,
},
'TexParameterf': {
'decoder_func': 'DoTexParameterf',
'valid_args': {
'2': 'GL_NEAREST'
},
},
'TexParameteri': {
'decoder_func': 'DoTexParameteri',
'valid_args': {
'2': 'GL_NEAREST'
},
},
'TexParameterfv': {
'type': 'PUT',
'data_type': 'GLfloat',
'data_value': 'GL_NEAREST',
'count': 1,
'decoder_func': 'DoTexParameterfv',
},
'TexParameteriv': {
'type': 'PUT',
'data_type': 'GLint',
'data_value': 'GL_NEAREST',
'count': 1,
'decoder_func': 'DoTexParameteriv',
},
'TexSubImage2D': {
'type': 'Manual',
'immediate': True,
'client_test': False,
'cmd_args': 'GLenumTextureTarget target, GLint level, '
'GLint xoffset, GLint yoffset, '
'GLsizei width, GLsizei height, '
'GLenumTextureFormat format, GLenumPixelType type, '
'const void* pixels, GLboolean internal'
},
'Uniform1f': {'type': 'PUTXn', 'data_type': 'GLfloat', 'count': 1},
'Uniform1fv': {
'type': 'PUTn',
'data_type': 'GLfloat',
'count': 1,
'decoder_func': 'DoUniform1fv',
},
'Uniform1i': {'decoder_func': 'DoUniform1i', 'unit_test': False},
'Uniform1iv': {
'type': 'PUTn',
'data_type': 'GLint',
'count': 1,
'decoder_func': 'DoUniform1iv',
'unit_test': False,
},
'Uniform2i': {'type': 'PUTXn', 'data_type': 'GLint', 'count': 2},
'Uniform2f': {'type': 'PUTXn', 'data_type': 'GLfloat', 'count': 2},
'Uniform2fv': {
'type': 'PUTn',
'data_type': 'GLfloat',
'count': 2,
'decoder_func': 'DoUniform2fv',
},
'Uniform2iv': {
'type': 'PUTn',
'data_type': 'GLint',
'count': 2,
'decoder_func': 'DoUniform2iv',
},
'Uniform3i': {'type': 'PUTXn', 'data_type': 'GLint', 'count': 3},
'Uniform3f': {'type': 'PUTXn', 'data_type': 'GLfloat', 'count': 3},
'Uniform3fv': {
'type': 'PUTn',
'data_type': 'GLfloat',
'count': 3,
'decoder_func': 'DoUniform3fv',
},
'Uniform3iv': {
'type': 'PUTn',
'data_type': 'GLint',
'count': 3,
'decoder_func': 'DoUniform3iv',
},
'Uniform4i': {'type': 'PUTXn', 'data_type': 'GLint', 'count': 4},
'Uniform4f': {'type': 'PUTXn', 'data_type': 'GLfloat', 'count': 4},
'Uniform4fv': {
'type': 'PUTn',
'data_type': 'GLfloat',
'count': 4,
'decoder_func': 'DoUniform4fv',
},
'Uniform4iv': {
'type': 'PUTn',
'data_type': 'GLint',
'count': 4,
'decoder_func': 'DoUniform4iv',
},
'UniformMatrix2fv': {
'type': 'PUTn',
'data_type': 'GLfloat',
'count': 4,
'decoder_func': 'DoUniformMatrix2fv',
},
'UniformMatrix3fv': {
'type': 'PUTn',
'data_type': 'GLfloat',
'count': 9,
'decoder_func': 'DoUniformMatrix3fv',
},
'UniformMatrix4fv': {
'type': 'PUTn',
'data_type': 'GLfloat',
'count': 16,
'decoder_func': 'DoUniformMatrix4fv',
},
'UnmapBufferCHROMIUM': {
'gen_cmd': False,
'extension': True,
'chromium': True,
'client_test': False,
'chromium': True,
},
'UnmapBufferSubDataCHROMIUM': {
'gen_cmd': False,
'extension': True,
'chromium': True,
'client_test': False,
'pepper_interface': 'ChromiumMapSub',
},
'UnmapTexSubImage2DCHROMIUM': {
'gen_cmd': False,
'extension': True,
'chromium': True,
'client_test': False,
'pepper_interface': 'ChromiumMapSub',
},
'UseProgram': {
'decoder_func': 'DoUseProgram',
'impl_func': False,
'unit_test': False,
},
'ValidateProgram': {'decoder_func': 'DoValidateProgram'},
'VertexAttrib1f': {'decoder_func': 'DoVertexAttrib1f'},
'VertexAttrib1fv': {
'type': 'PUT',
'data_type': 'GLfloat',
'count': 1,
'decoder_func': 'DoVertexAttrib1fv',
},
'VertexAttrib2f': {'decoder_func': 'DoVertexAttrib2f'},
'VertexAttrib2fv': {
'type': 'PUT',
'data_type': 'GLfloat',
'count': 2,
'decoder_func': 'DoVertexAttrib2fv',
},
'VertexAttrib3f': {'decoder_func': 'DoVertexAttrib3f'},
'VertexAttrib3fv': {
'type': 'PUT',
'data_type': 'GLfloat',
'count': 3,
'decoder_func': 'DoVertexAttrib3fv',
},
'VertexAttrib4f': {'decoder_func': 'DoVertexAttrib4f'},
'VertexAttrib4fv': {
'type': 'PUT',
'data_type': 'GLfloat',
'count': 4,
'decoder_func': 'DoVertexAttrib4fv',
},
'VertexAttribPointer': {
'type': 'Manual',
'cmd_args': 'GLuint indx, GLintVertexAttribSize size, '
'GLenumVertexAttribType type, GLboolean normalized, '
'GLsizei stride, GLuint offset',
'client_test': False,
},
'Scissor': {
'type': 'StateSet',
'state': 'Scissor',
},
'Viewport': {
'decoder_func': 'DoViewport',
},
'ResizeCHROMIUM': {
'type': 'Custom',
'impl_func': False,
'unit_test': False,
'extension': True,
'chromium': True,
},
'GetRequestableExtensionsCHROMIUM': {
'type': 'Custom',
'impl_func': False,
'immediate': False,
'cmd_args': 'uint32 bucket_id',
'extension': True,
'chromium': True,
},
'RequestExtensionCHROMIUM': {
'type': 'Custom',
'impl_func': False,
'immediate': False,
'client_test': False,
'cmd_args': 'uint32 bucket_id',
'extension': True,
'chromium': True,
},
'RateLimitOffscreenContextCHROMIUM': {
'gen_cmd': False,
'extension': True,
'chromium': True,
'client_test': False,
},
'CreateStreamTextureCHROMIUM': {
'type': 'Custom',
'cmd_args': 'GLuint client_id, void* result',
'result': ['GLuint'],
'immediate': False,
'impl_func': False,
'expectation': False,
'extension': True,
'chromium': True,
'client_test': False,
},
'DestroyStreamTextureCHROMIUM': {
'type': 'Custom',
'impl_func': False,
'expectation': False,
'extension': True,
'chromium': True,
},
'TexImageIOSurface2DCHROMIUM': {
'decoder_func': 'DoTexImageIOSurface2DCHROMIUM',
'unit_test': False,
'extension': True,
'chromium': True,
},
'CopyTextureCHROMIUM': {
'decoder_func': 'DoCopyTextureCHROMIUM',
'unit_test': False,
'extension': True,
'chromium': True,
},
'TexStorage2DEXT': {
'unit_test': False,
'extension': True,
'decoder_func': 'DoTexStorage2DEXT',
},
'DrawArraysInstancedANGLE': {
'type': 'Manual',
'cmd_args': 'GLenumDrawMode mode, GLint first, GLsizei count, '
'GLsizei primcount',
'extension': True,
'unit_test': False,
'pepper_interface': 'InstancedArrays',
'defer_draws': True,
},
'DrawElementsInstancedANGLE': {
'type': 'Manual',
'cmd_args': 'GLenumDrawMode mode, GLsizei count, '
'GLenumIndexType type, GLuint index_offset, GLsizei primcount',
'extension': True,
'unit_test': False,
'client_test': False,
'pepper_interface': 'InstancedArrays',
'defer_draws': True,
},
'VertexAttribDivisorANGLE': {
'type': 'Manual',
'cmd_args': 'GLuint index, GLuint divisor',
'extension': True,
'unit_test': False,
'pepper_interface': 'InstancedArrays',
},
'GenQueriesEXT': {
'type': 'GENn',
'gl_test_func': 'glGenQueriesARB',
'resource_type': 'Query',
'resource_types': 'Queries',
'unit_test': False,
'pepper_interface': 'Query',
},
'DeleteQueriesEXT': {
'type': 'DELn',
'gl_test_func': 'glDeleteQueriesARB',
'resource_type': 'Query',
'resource_types': 'Queries',
'unit_test': False,
'pepper_interface': 'Query',
},
'IsQueryEXT': {
'gen_cmd': False,
'client_test': False,
'pepper_interface': 'Query',
},
'BeginQueryEXT': {
'type': 'Manual',
'cmd_args': 'GLenumQueryTarget target, GLidQuery id, void* sync_data',
'immediate': False,
'gl_test_func': 'glBeginQuery',
'pepper_interface': 'Query',
},
'EndQueryEXT': {
'type': 'Manual',
'cmd_args': 'GLenumQueryTarget target, GLuint submit_count',
'gl_test_func': 'glEndnQuery',
'client_test': False,
'pepper_interface': 'Query',
},
'GetQueryivEXT': {
'gen_cmd': False,
'client_test': False,
'gl_test_func': 'glGetQueryiv',
'pepper_interface': 'Query',
},
'GetQueryObjectuivEXT': {
'gen_cmd': False,
'client_test': False,
'gl_test_func': 'glGetQueryObjectuiv',
'pepper_interface': 'Query',
},
'BindUniformLocationCHROMIUM': {
'type': 'GLchar',
'bucket': True,
'needs_size': True,
'gl_test_func': 'DoBindUniformLocationCHROMIUM',
},
'InsertEventMarkerEXT': {
'type': 'GLcharN',
'decoder_func': 'DoInsertEventMarkerEXT',
'expectation': False,
},
'PushGroupMarkerEXT': {
'type': 'GLcharN',
'decoder_func': 'DoPushGroupMarkerEXT',
'expectation': False,
},
'PopGroupMarkerEXT': {
'decoder_func': 'DoPopGroupMarkerEXT',
'expectation': False,
'impl_func': False,
},
'GenVertexArraysOES': {
'type': 'GENn',
'gl_test_func': 'glGenVertexArraysOES',
'resource_type': 'VertexArray',
'resource_types': 'VertexArrays',
'unit_test': False,
},
'BindVertexArrayOES': {
'type': 'Bind',
'gl_test_func': 'glBindVertexArrayOES',
'decoder_func': 'DoBindVertexArrayOES',
'gen_func': 'GenVertexArraysOES',
'unit_test': False,
'client_test': False,
},
'DeleteVertexArraysOES': {
'type': 'DELn',
'gl_test_func': 'glDeleteVertexArraysOES',
'resource_type': 'VertexArray',
'resource_types': 'VertexArrays',
'unit_test': False,
},
'IsVertexArrayOES': {
'type': 'Is',
'gl_test_func': 'glIsVertexArrayOES',
'decoder_func': 'DoIsVertexArrayOES',
'expectation': False,
'unit_test': False,
},
'BindTexImage2DCHROMIUM': {
'decoder_func': 'DoBindTexImage2DCHROMIUM',
'unit_test': False,
'extension': True,
'chromium': True,
},
'ReleaseTexImage2DCHROMIUM': {
'decoder_func': 'DoReleaseTexImage2DCHROMIUM',
'unit_test': False,
'extension': True,
'chromium': True,
},
'TraceBeginCHROMIUM': {
'type': 'Custom',
'impl_func': False,
'immediate': False,
'client_test': False,
'cmd_args': 'GLuint bucket_id',
'extension': True,
'chromium': True,
},
'TraceEndCHROMIUM': {
'decoder_func': 'DoTraceEndCHROMIUM',
'unit_test': False,
'extension': True,
'chromium': True,
},
'AsyncTexImage2DCHROMIUM': {
'type': 'Manual',
'immediate': False,
'client_test': False,
'extension': True,
'chromium': True,
},
'AsyncTexSubImage2DCHROMIUM': {
'type': 'Manual',
'immediate': False,
'client_test': False,
'extension': True,
'chromium': True,
},
'DiscardFramebufferEXT': {
'type': 'PUTn',
'count': 1,
'data_type': 'GLenum',
'cmd_args': 'GLenum target, GLsizei count, '
'const GLenum* attachments',
'decoder_func': 'DoDiscardFramebufferEXT',
'unit_test': False,
'client_test': False,
'extension': True,
},
'LoseContextCHROMIUM': {
'type': 'Manual',
'impl_func': True,
'extension': True,
'chromium': True,
},
}
def Grouper(n, iterable, fillvalue=None):
"""Collect data into fixed-length chunks or blocks"""
args = [iter(iterable)] * n
return itertools.izip_longest(fillvalue=fillvalue, *args)
def SplitWords(input_string):
"""Transforms a input_string into a list of lower-case components.
Args:
input_string: the input string.
Returns:
a list of lower-case words.
"""
if input_string.find('_') > -1:
# 'some_TEXT_' -> 'some text'
return input_string.replace('_', ' ').strip().lower().split()
else:
if re.search('[A-Z]', input_string) and re.search('[a-z]', input_string):
# mixed case.
# look for capitalization to cut input_strings
# 'SomeText' -> 'Some Text'
input_string = re.sub('([A-Z])', r' \1', input_string).strip()
# 'Vector3' -> 'Vector 3'
input_string = re.sub('([^0-9])([0-9])', r'\1 \2', input_string)
return input_string.lower().split()
def Lower(words):
"""Makes a lower-case identifier from words.
Args:
words: a list of lower-case words.
Returns:
the lower-case identifier.
"""
return '_'.join(words)
def ToUnderscore(input_string):
"""converts CamelCase to camel_case."""
words = SplitWords(input_string)
return Lower(words)
class CWriter(object):
"""Writes to a file formatting it for Google's style guidelines."""
def __init__(self, filename):
self.filename = filename
self.file_num = 0
self.content = []
def SetFileNum(self, num):
"""Used to help write number files and tests."""
self.file_num = num
def Write(self, string):
"""Writes a string to a file spliting if it's > 80 characters."""
lines = string.splitlines()
num_lines = len(lines)
for ii in range(0, num_lines):
self.__WriteLine(lines[ii], ii < (num_lines - 1) or string[-1] == '\n')
def __FindSplit(self, string):
"""Finds a place to split a string."""
splitter = string.find('=')
if splitter >= 1 and not string[splitter + 1] == '=' and splitter < 80:
return splitter
# parts = string.split('(')
parts = re.split("(?<=[^\"])\((?!\")", string)
fptr = re.compile('\*\w*\)')
if len(parts) > 1:
splitter = len(parts[0])
for ii in range(1, len(parts)):
# Don't split on the dot in "if (.condition)".
if (not parts[ii - 1][-3:] == "if " and
# Don't split "(.)" or "(.*fptr)".
(len(parts[ii]) > 0 and
not parts[ii][0] == ")" and not fptr.match(parts[ii]))
and splitter < 80):
return splitter
splitter += len(parts[ii]) + 1
done = False
end = len(string)
last_splitter = -1
while not done:
splitter = string[0:end].rfind(',')
if splitter < 0 or (splitter > 0 and string[splitter - 1] == '"'):
return last_splitter
elif splitter >= 80:
end = splitter
else:
return splitter
def __WriteLine(self, line, ends_with_eol):
"""Given a signle line, writes it to a file, splitting if it's > 80 chars"""
if len(line) >= 80:
i = self.__FindSplit(line)
if i > 0:
line1 = line[0:i + 1]
if line1[-1] == ' ':
line1 = line1[:-1]
lineend = ''
if line1[0] == '#':
lineend = ' \\'
nolint = ''
if len(line1) > 80:
nolint = ' // NOLINT'
self.__AddLine(line1 + nolint + lineend + '\n')
match = re.match("( +)", line1)
indent = ""
if match:
indent = match.group(1)
splitter = line[i]
if not splitter == ',':
indent = " " + indent
self.__WriteLine(indent + line[i + 1:].lstrip(), True)
return
nolint = ''
if len(line) > 80:
nolint = ' // NOLINT'
self.__AddLine(line + nolint)
if ends_with_eol:
self.__AddLine('\n')
def __AddLine(self, line):
self.content.append(line)
def Close(self):
"""Close the file."""
content = "".join(self.content)
write_file = True
if os.path.exists(self.filename):
old_file = open(self.filename, "rb");
old_content = old_file.read()
old_file.close();
if content == old_content:
write_file = False
if write_file:
file = open(self.filename, "wb")
file.write(content)
file.close()
class CHeaderWriter(CWriter):
"""Writes a C Header file."""
_non_alnum_re = re.compile(r'[^a-zA-Z0-9]')
def __init__(self, filename, file_comment = None):
CWriter.__init__(self, filename)
base = os.path.abspath(filename)
while os.path.basename(base) != 'src':
new_base = os.path.dirname(base)
assert new_base != base # Prevent infinite loop.
base = new_base
hpath = os.path.relpath(filename, base)
self.guard = self._non_alnum_re.sub('_', hpath).upper() + '_'
self.Write(_LICENSE)
self.Write(_DO_NOT_EDIT_WARNING)
if not file_comment == None:
self.Write(file_comment)
self.Write("#ifndef %s\n" % self.guard)
self.Write("#define %s\n\n" % self.guard)
def Close(self):
self.Write("#endif // %s\n\n" % self.guard)
CWriter.Close(self)
class TypeHandler(object):
"""This class emits code for a particular type of function."""
_remove_expected_call_re = re.compile(r' EXPECT_CALL.*?;\n', re.S)
def __init__(self):
pass
def InitFunction(self, func):
"""Add or adjust anything type specific for this function."""
if func.GetInfo('needs_size') and not func.name.endswith('Bucket'):
func.AddCmdArg(DataSizeArgument('data_size'))
def AddImmediateFunction(self, generator, func):
"""Adds an immediate version of a function."""
# Generate an immediate command if there is only 1 pointer arg.
immediate = func.GetInfo('immediate') # can be True, False or None
if immediate == True or immediate == None:
if func.num_pointer_args == 1 or immediate:
generator.AddFunction(ImmediateFunction(func))
def AddBucketFunction(self, generator, func):
"""Adds a bucket version of a function."""
# Generate an immediate command if there is only 1 pointer arg.
bucket = func.GetInfo('bucket') # can be True, False or None
if bucket:
generator.AddFunction(BucketFunction(func))
def WriteStruct(self, func, file):
"""Writes a structure that matches the arguments to a function."""
comment = func.GetInfo('cmd_comment')
if not comment == None:
file.Write(comment)
file.Write("struct %s {\n" % func.name)
file.Write(" typedef %s ValueType;\n" % func.name)
file.Write(" static const CommandId kCmdId = k%s;\n" % func.name)
func.WriteCmdArgFlag(file)
file.Write("\n")
result = func.GetInfo('result')
if not result == None:
if len(result) == 1:
file.Write(" typedef %s Result;\n\n" % result[0])
else:
file.Write(" struct Result {\n")
for line in result:
file.Write(" %s;\n" % line)
file.Write(" };\n\n")
func.WriteCmdComputeSize(file)
func.WriteCmdSetHeader(file)
func.WriteCmdInit(file)
func.WriteCmdSet(file)
file.Write(" gpu::CommandHeader header;\n")
args = func.GetCmdArgs()
for arg in args:
file.Write(" %s %s;\n" % (arg.cmd_type, arg.name))
file.Write("};\n")
file.Write("\n")
size = len(args) * _SIZE_OF_UINT32 + _SIZE_OF_COMMAND_HEADER
file.Write("COMPILE_ASSERT(sizeof(%s) == %d,\n" % (func.name, size))
file.Write(" Sizeof_%s_is_not_%d);\n" % (func.name, size))
file.Write("COMPILE_ASSERT(offsetof(%s, header) == 0,\n" % func.name)
file.Write(" OffsetOf_%s_header_not_0);\n" % func.name)
offset = _SIZE_OF_COMMAND_HEADER
for arg in args:
file.Write("COMPILE_ASSERT(offsetof(%s, %s) == %d,\n" %
(func.name, arg.name, offset))
file.Write(" OffsetOf_%s_%s_not_%d);\n" %
(func.name, arg.name, offset))
offset += _SIZE_OF_UINT32
if not result == None and len(result) > 1:
offset = 0;
for line in result:
parts = line.split()
name = parts[-1]
check = """
COMPILE_ASSERT(offsetof(%(cmd_name)s::Result, %(field_name)s) == %(offset)d,
OffsetOf_%(cmd_name)s_Result_%(field_name)s_not_%(offset)d);
"""
file.Write((check.strip() + "\n") % {
'cmd_name': func.name,
'field_name': name,
'offset': offset,
})
offset += _SIZE_OF_UINT32
file.Write("\n")
def WriteHandlerImplementation(self, func, file):
"""Writes the handler implementation for this command."""
file.Write(" %s(%s);\n" %
(func.GetGLFunctionName(), func.MakeOriginalArgString("")))
def WriteCmdSizeTest(self, func, file):
"""Writes the size test for a command."""
file.Write(" EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);\n")
def WriteFormatTest(self, func, file):
"""Writes a format test for a command."""
file.Write("TEST_F(GLES2FormatTest, %s) {\n" % func.name)
file.Write(" %s& cmd = *GetBufferAs<%s>();\n" % (func.name, func.name))
file.Write(" void* next_cmd = cmd.Set(\n")
file.Write(" &cmd")
args = func.GetCmdArgs()
for value, arg in enumerate(args):
file.Write(",\n static_cast<%s>(%d)" % (arg.type, value + 11))
file.Write(");\n")
file.Write(" EXPECT_EQ(static_cast<uint32>(%s::kCmdId),\n" % func.name)
file.Write(" cmd.header.command);\n")
func.type_handler.WriteCmdSizeTest(func, file)
for value, arg in enumerate(args):
file.Write(" EXPECT_EQ(static_cast<%s>(%d), cmd.%s);\n" %
(arg.type, value + 11, arg.name))
file.Write(" CheckBytesWrittenMatchesExpectedSize(\n")
file.Write(" next_cmd, sizeof(cmd));\n")
file.Write("}\n")
file.Write("\n")
def WriteImmediateFormatTest(self, func, file):
"""Writes a format test for an immediate version of a command."""
pass
def WriteBucketFormatTest(self, func, file):
"""Writes a format test for a bucket version of a command."""
pass
def WriteGetDataSizeCode(self, func, file):
"""Writes the code to set data_size used in validation"""
pass
def WriteImmediateCmdSizeTest(self, func, file):
"""Writes a size test for an immediate version of a command."""
file.Write(" // TODO(gman): Compute correct size.\n")
file.Write(" EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);\n")
def WriteImmediateHandlerImplementation (self, func, file):
"""Writes the handler impl for the immediate version of a command."""
file.Write(" %s(%s);\n" %
(func.GetGLFunctionName(), func.MakeOriginalArgString("")))
def WriteBucketHandlerImplementation (self, func, file):
"""Writes the handler impl for the bucket version of a command."""
file.Write(" %s(%s);\n" %
(func.GetGLFunctionName(), func.MakeOriginalArgString("")))
def WriteServiceImplementation(self, func, file):
"""Writes the service implementation for a command."""
file.Write(
"error::Error GLES2DecoderImpl::Handle%s(\n" % func.name)
file.Write(
" uint32 immediate_data_size, const gles2::%s& c) {\n" % func.name)
self.WriteHandlerDeferReadWrite(func, file);
if len(func.GetOriginalArgs()) > 0:
last_arg = func.GetLastOriginalArg()
all_but_last_arg = func.GetOriginalArgs()[:-1]
for arg in all_but_last_arg:
arg.WriteGetCode(file)
self.WriteGetDataSizeCode(func, file)
last_arg.WriteGetCode(file)
func.WriteHandlerValidation(file)
func.WriteHandlerImplementation(file)
file.Write(" return error::kNoError;\n")
file.Write("}\n")
file.Write("\n")
def WriteImmediateServiceImplementation(self, func, file):
"""Writes the service implementation for an immediate version of command."""
file.Write(
"error::Error GLES2DecoderImpl::Handle%s(\n" % func.name)
file.Write(
" uint32 immediate_data_size, const gles2::%s& c) {\n" % func.name)
self.WriteHandlerDeferReadWrite(func, file);
last_arg = func.GetLastOriginalArg()
all_but_last_arg = func.GetOriginalArgs()[:-1]
for arg in all_but_last_arg:
arg.WriteGetCode(file)
self.WriteGetDataSizeCode(func, file)
last_arg.WriteGetCode(file)
func.WriteHandlerValidation(file)
func.WriteHandlerImplementation(file)
file.Write(" return error::kNoError;\n")
file.Write("}\n")
file.Write("\n")
def WriteBucketServiceImplementation(self, func, file):
"""Writes the service implementation for a bucket version of command."""
file.Write(
"error::Error GLES2DecoderImpl::Handle%s(\n" % func.name)
file.Write(
" uint32 immediate_data_size, const gles2::%s& c) {\n" % func.name)
self.WriteHandlerDeferReadWrite(func, file);
last_arg = func.GetLastOriginalArg()
all_but_last_arg = func.GetOriginalArgs()[:-1]
for arg in all_but_last_arg:
arg.WriteGetCode(file)
self.WriteGetDataSizeCode(func, file)
last_arg.WriteGetCode(file)
func.WriteHandlerValidation(file)
func.WriteHandlerImplementation(file)
file.Write(" return error::kNoError;\n")
file.Write("}\n")
file.Write("\n")
def WriteHandlerDeferReadWrite(self, func, file):
"""Writes the code to handle deferring reads or writes."""
defer_reads = func.GetInfo('defer_reads')
defer_draws = func.GetInfo('defer_draws')
conditions = []
if defer_draws:
conditions.append('ShouldDeferDraws()');
if defer_reads:
conditions.append('ShouldDeferReads()');
if not conditions:
return
file.Write(" if (%s)\n" % ' || '.join(conditions))
file.Write(" return error::kDeferCommandUntilLater;\n")
def WriteValidUnitTest(self, func, file, test, extra = {}):
"""Writes a valid unit test."""
if func.GetInfo('expectation') == False:
test = self._remove_expected_call_re.sub('', test)
name = func.name
arg_strings = []
for count, arg in enumerate(func.GetOriginalArgs()):
arg_strings.append(arg.GetValidArg(func, count, 0))
gl_arg_strings = []
for count, arg in enumerate(func.GetOriginalArgs()):
gl_arg_strings.append(arg.GetValidGLArg(func, count, 0))
gl_func_name = func.GetGLTestFunctionName()
vars = {
'test_name': 'GLES2DecoderTest%d' % file.file_num,
'name':name,
'gl_func_name': gl_func_name,
'args': ", ".join(arg_strings),
'gl_args': ", ".join(gl_arg_strings),
}
vars.update(extra)
file.Write(test % vars)
def WriteInvalidUnitTest(self, func, file, test, extra = {}):
"""Writes a invalid unit test."""
for arg_index, arg in enumerate(func.GetOriginalArgs()):
num_invalid_values = arg.GetNumInvalidValues(func)
for value_index in range(0, num_invalid_values):
arg_strings = []
parse_result = "kNoError"
gl_error = None
for count, arg in enumerate(func.GetOriginalArgs()):
if count == arg_index:
(arg_string, parse_result, gl_error) = arg.GetInvalidArg(
count, value_index)
else:
arg_string = arg.GetValidArg(func, count, 0)
arg_strings.append(arg_string)
gl_arg_strings = []
for arg in func.GetOriginalArgs():
gl_arg_strings.append("_")
gl_func_name = func.GetGLTestFunctionName()
gl_error_test = ''
if not gl_error == None:
gl_error_test = '\n EXPECT_EQ(%s, GetGLError());' % gl_error
vars = {
'test_name': 'GLES2DecoderTest%d' % file.file_num ,
'name': func.name,
'arg_index': arg_index,
'value_index': value_index,
'gl_func_name': gl_func_name,
'args': ", ".join(arg_strings),
'all_but_last_args': ", ".join(arg_strings[:-1]),
'gl_args': ", ".join(gl_arg_strings),
'parse_result': parse_result,
'gl_error_test': gl_error_test,
}
vars.update(extra)
file.Write(test % vars)
def WriteServiceUnitTest(self, func, file):
"""Writes the service unit test for a command."""
valid_test = """
TEST_F(%(test_name)s, %(name)sValidArgs) {
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s));
SpecializedSetup<%(name)s, 0>(true);
%(name)s cmd;
cmd.Init(%(args)s);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
"""
self.WriteValidUnitTest(func, file, valid_test)
invalid_test = """
TEST_F(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s)).Times(0);
SpecializedSetup<%(name)s, 0>(false);
%(name)s cmd;
cmd.Init(%(args)s);
EXPECT_EQ(error::%(parse_result)s, ExecuteCmd(cmd));%(gl_error_test)s
}
"""
self.WriteInvalidUnitTest(func, file, invalid_test)
def WriteImmediateServiceUnitTest(self, func, file):
"""Writes the service unit test for an immediate command."""
file.Write("// TODO(gman): %s\n" % func.name)
def WriteImmediateValidationCode(self, func, file):
"""Writes the validation code for an immediate version of a command."""
pass
def WriteBucketServiceUnitTest(self, func, file):
"""Writes the service unit test for a bucket command."""
file.Write("// TODO(gman): %s\n" % func.name)
def WriteBucketValidationCode(self, func, file):
"""Writes the validation code for a bucket version of a command."""
file.Write("// TODO(gman): %s\n" % func.name)
def WriteGLES2ImplementationDeclaration(self, func, file):
"""Writes the GLES2 Implemention declaration."""
impl_decl = func.GetInfo('impl_decl')
if impl_decl == None or impl_decl == True:
file.Write("virtual %s %s(%s) OVERRIDE;\n" %
(func.return_type, func.original_name,
func.MakeTypedOriginalArgString("")))
file.Write("\n")
def WriteGLES2CLibImplementation(self, func, file):
file.Write("%s GLES2%s(%s) {\n" %
(func.return_type, func.name,
func.MakeTypedOriginalArgString("")))
result_string = "return "
if func.return_type == "void":
result_string = ""
file.Write(" %sgles2::GetGLContext()->%s(%s);\n" %
(result_string, func.original_name,
func.MakeOriginalArgString("")))
file.Write("}\n")
def WriteGLES2Header(self, func, file):
"""Writes a re-write macro for GLES"""
file.Write("#define gl%s GLES2_GET_FUN(%s)\n" %(func.name, func.name))
def WriteClientGLCallLog(self, func, file):
"""Writes a logging macro for the client side code."""
comma = ""
if len(func.GetOriginalArgs()):
comma = " << "
file.Write(
' GPU_CLIENT_LOG("[" << GetLogPrefix() << "] gl%s("%s%s << ")");\n' %
(func.original_name, comma, func.MakeLogArgString()))
def WriteClientGLReturnLog(self, func, file):
"""Writes the return value logging code."""
if func.return_type != "void":
file.Write(' GPU_CLIENT_LOG("return:" << result)\n')
def WriteGLES2ImplementationHeader(self, func, file):
"""Writes the GLES2 Implemention."""
self.WriteGLES2ImplementationDeclaration(func, file)
def WriteGLES2Implementation(self, func, file):
"""Writes the GLES2 Implemention."""
impl_func = func.GetInfo('impl_func')
impl_decl = func.GetInfo('impl_decl')
gen_cmd = func.GetInfo('gen_cmd')
if (func.can_auto_generate and
(impl_func == None or impl_func == True) and
(impl_decl == None or impl_decl == True) and
(gen_cmd == None or gen_cmd == True)):
file.Write("%s GLES2Implementation::%s(%s) {\n" %
(func.return_type, func.original_name,
func.MakeTypedOriginalArgString("")))
file.Write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
self.WriteClientGLCallLog(func, file)
func.WriteDestinationInitalizationValidation(file)
for arg in func.GetOriginalArgs():
arg.WriteClientSideValidationCode(file, func)
file.Write(" helper_->%s(%s);\n" %
(func.name, func.MakeOriginalArgString("")))
file.Write(" CheckGLError();\n")
self.WriteClientGLReturnLog(func, file)
file.Write("}\n")
file.Write("\n")
def WriteGLES2InterfaceHeader(self, func, file):
"""Writes the GLES2 Interface."""
file.Write("virtual %s %s(%s) = 0;\n" %
(func.return_type, func.original_name,
func.MakeTypedOriginalArgString("")))
def WriteGLES2InterfaceStub(self, func, file):
"""Writes the GLES2 Interface stub declaration."""
file.Write("virtual %s %s(%s) OVERRIDE;\n" %
(func.return_type, func.original_name,
func.MakeTypedOriginalArgString("")))
def WriteGLES2InterfaceStubImpl(self, func, file):
"""Writes the GLES2 Interface stub declaration."""
args = func.GetOriginalArgs()
arg_string = ", ".join(
["%s /* %s */" % (arg.type, arg.name) for arg in args])
file.Write("%s GLES2InterfaceStub::%s(%s) {\n" %
(func.return_type, func.original_name, arg_string))
if func.return_type != "void":
file.Write(" return 0;\n")
file.Write("}\n")
def WriteGLES2ImplementationUnitTest(self, func, file):
"""Writes the GLES2 Implemention unit test."""
client_test = func.GetInfo('client_test')
if (func.can_auto_generate and
(client_test == None or client_test == True)):
code = """
TEST_F(GLES2ImplementationTest, %(name)s) {
struct Cmds {
%(name)s cmd;
};
Cmds expected;
expected.cmd.Init(%(cmd_args)s);
gl_->%(name)s(%(args)s);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
}
"""
cmd_arg_strings = []
for count, arg in enumerate(func.GetCmdArgs()):
cmd_arg_strings.append(arg.GetValidClientSideCmdArg(func, count, 0))
count += 1
gl_arg_strings = []
for count, arg in enumerate(func.GetOriginalArgs()):
gl_arg_strings.append(arg.GetValidClientSideArg(func, count, 0))
file.Write(code % {
'name': func.name,
'args': ", ".join(gl_arg_strings),
'cmd_args': ", ".join(cmd_arg_strings),
})
else:
if client_test != False:
file.Write("// TODO: Implement unit test for %s\n" % func.name)
def WriteDestinationInitalizationValidation(self, func, file):
"""Writes the client side destintion initialization validation."""
for arg in func.GetOriginalArgs():
arg.WriteDestinationInitalizationValidation(file, func)
def WriteTraceEvent(self, func, file):
file.Write(' TRACE_EVENT0("gpu", "GLES2Implementation::%s");\n' %
func.original_name)
def WriteImmediateCmdComputeSize(self, func, file):
"""Writes the size computation code for the immediate version of a cmd."""
file.Write(" static uint32 ComputeSize(uint32 size_in_bytes) {\n")
file.Write(" return static_cast<uint32>(\n")
file.Write(" sizeof(ValueType) + // NOLINT\n")
file.Write(" RoundSizeToMultipleOfEntries(size_in_bytes));\n")
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdSetHeader(self, func, file):
"""Writes the SetHeader function for the immediate version of a cmd."""
file.Write(" void SetHeader(uint32 size_in_bytes) {\n")
file.Write(" header.SetCmdByTotalSize<ValueType>(size_in_bytes);\n")
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdInit(self, func, file):
"""Writes the Init function for the immediate version of a command."""
raise NotImplementedError(func.name)
def WriteImmediateCmdSet(self, func, file):
"""Writes the Set function for the immediate version of a command."""
raise NotImplementedError(func.name)
def WriteCmdHelper(self, func, file):
"""Writes the cmd helper definition for a cmd."""
code = """ void %(name)s(%(typed_args)s) {
gles2::%(name)s* c = GetCmdSpace<gles2::%(name)s>();
if (c) {
c->Init(%(args)s);
}
}
"""
file.Write(code % {
"name": func.name,
"typed_args": func.MakeTypedCmdArgString(""),
"args": func.MakeCmdArgString(""),
})
def WriteImmediateCmdHelper(self, func, file):
"""Writes the cmd helper definition for the immediate version of a cmd."""
code = """ void %(name)s(%(typed_args)s) {
const uint32 s = 0; // TODO(gman): compute correct size
gles2::%(name)s* c =
GetImmediateCmdSpaceTotalSize<gles2::%(name)s>(s);
if (c) {
c->Init(%(args)s);
}
}
"""
file.Write(code % {
"name": func.name,
"typed_args": func.MakeTypedCmdArgString(""),
"args": func.MakeCmdArgString(""),
})
class StateSetHandler(TypeHandler):
"""Handler for commands that simply set state."""
def __init__(self):
TypeHandler.__init__(self)
def WriteHandlerImplementation(self, func, file):
"""Overrriden from TypeHandler."""
state_name = func.GetInfo('state')
state = _STATES[state_name]
states = state['states']
args = func.GetOriginalArgs()
code = []
for ndx,item in enumerate(states):
if 'range_checks' in item:
for range_check in item['range_checks']:
code.append("%s %s" % (args[ndx].name, range_check['check']))
if len(code):
file.Write(" if (%s) {\n" % " ||\n ".join(code))
file.Write(
' SetGLError(GL_INVALID_VALUE, "%s", "%s out of range");\n' %
(func.name, args[ndx].name))
file.Write(" return error::kNoError;\n")
file.Write(" }\n")
code = []
for ndx,item in enumerate(states):
code.append("state_.%s != %s" % (item['name'], args[ndx].name))
file.Write(" if (%s) {\n" % " ||\n ".join(code))
for ndx,item in enumerate(states):
file.Write(" state_.%s = %s;\n" % (item['name'], args[ndx].name))
if 'state_flag' in state:
file.Write(" %s = true;\n" % state['state_flag'])
if not func.GetInfo("no_gl"):
file.Write(" %s(%s);\n" %
(func.GetGLFunctionName(), func.MakeOriginalArgString("")))
file.Write(" }\n")
def WriteServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
TypeHandler.WriteServiceUnitTest(self, func, file)
state_name = func.GetInfo('state')
state = _STATES[state_name]
states = state['states']
for ndx,item in enumerate(states):
if 'range_checks' in item:
for check_ndx, range_check in enumerate(item['range_checks']):
valid_test = """
TEST_F(%(test_name)s, %(name)sInvalidValue%(ndx)d_%(check_ndx)d) {
SpecializedSetup<%(name)s, 0>(false);
%(name)s cmd;
cmd.Init(%(args)s);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
}
"""
name = func.name
arg_strings = []
for count, arg in enumerate(func.GetOriginalArgs()):
arg_strings.append(arg.GetValidArg(func, count, 0))
arg_strings[ndx] = range_check['test_value']
vars = {
'test_name': 'GLES2DecoderTest%d' % file.file_num,
'name': name,
'ndx': ndx,
'check_ndx': check_ndx,
'args': ", ".join(arg_strings),
}
file.Write(valid_test % vars)
class StateSetRGBAlphaHandler(TypeHandler):
"""Handler for commands that simply set state that have rgb/alpha."""
def __init__(self):
TypeHandler.__init__(self)
def WriteHandlerImplementation(self, func, file):
"""Overrriden from TypeHandler."""
state_name = func.GetInfo('state')
state = _STATES[state_name]
states = state['states']
args = func.GetOriginalArgs()
num_args = len(args)
code = []
for ndx,item in enumerate(states):
code.append("state_.%s != %s" % (item['name'], args[ndx % num_args].name))
file.Write(" if (%s) {\n" % " ||\n ".join(code))
for ndx, item in enumerate(states):
file.Write(" state_.%s = %s;\n" %
(item['name'], args[ndx % num_args].name))
if 'state_flag' in state:
file.Write(" %s = true;\n" % state['state_flag'])
if not func.GetInfo("no_gl"):
file.Write(" %s(%s);\n" %
(func.GetGLFunctionName(), func.MakeOriginalArgString("")))
file.Write(" }\n")
class StateSetFrontBackSeparateHandler(TypeHandler):
"""Handler for commands that simply set state that have front/back."""
def __init__(self):
TypeHandler.__init__(self)
def WriteHandlerImplementation(self, func, file):
"""Overrriden from TypeHandler."""
state_name = func.GetInfo('state')
state = _STATES[state_name]
states = state['states']
args = func.GetOriginalArgs()
face = args[0].name
num_args = len(args)
file.Write(" bool changed = false;\n")
for group_ndx, group in enumerate(Grouper(num_args - 1, states)):
file.Write(" if (%s == %s || %s == GL_FRONT_AND_BACK) {\n" %
(face, ('GL_FRONT', 'GL_BACK')[group_ndx], face))
code = []
for ndx, item in enumerate(group):
code.append("state_.%s != %s" % (item['name'], args[ndx + 1].name))
file.Write(" changed |= %s;\n" % " ||\n ".join(code))
file.Write(" }\n")
file.Write(" if (changed) {\n")
for group_ndx, group in enumerate(Grouper(num_args - 1, states)):
file.Write(" if (%s == %s || %s == GL_FRONT_AND_BACK) {\n" %
(face, ('GL_FRONT', 'GL_BACK')[group_ndx], face))
for ndx, item in enumerate(group):
file.Write(" state_.%s = %s;\n" %
(item['name'], args[ndx + 1].name))
file.Write(" }\n")
if 'state_flag' in state:
file.Write(" %s = true;\n" % state['state_flag'])
if not func.GetInfo("no_gl"):
file.Write(" %s(%s);\n" %
(func.GetGLFunctionName(), func.MakeOriginalArgString("")))
file.Write(" }\n")
class StateSetFrontBackHandler(TypeHandler):
"""Handler for commands that simply set state that set both front/back."""
def __init__(self):
TypeHandler.__init__(self)
def WriteHandlerImplementation(self, func, file):
"""Overrriden from TypeHandler."""
state_name = func.GetInfo('state')
state = _STATES[state_name]
states = state['states']
args = func.GetOriginalArgs()
num_args = len(args)
code = []
for group_ndx, group in enumerate(Grouper(num_args, states)):
for ndx, item in enumerate(group):
code.append("state_.%s != %s" % (item['name'], args[ndx].name))
file.Write(" if (%s) {\n" % " ||\n ".join(code))
for group_ndx, group in enumerate(Grouper(num_args, states)):
for ndx, item in enumerate(group):
file.Write(" state_.%s = %s;\n" % (item['name'], args[ndx].name))
if 'state_flag' in state:
file.Write(" %s = true;\n" % state['state_flag'])
if not func.GetInfo("no_gl"):
file.Write(" %s(%s);\n" %
(func.GetGLFunctionName(), func.MakeOriginalArgString("")))
file.Write(" }\n")
class CustomHandler(TypeHandler):
"""Handler for commands that are auto-generated but require minor tweaks."""
def __init__(self):
TypeHandler.__init__(self)
def WriteServiceImplementation(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteImmediateServiceImplementation(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteBucketServiceImplementation(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("// TODO(gman): %s\n\n" % func.name)
def WriteImmediateServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("// TODO(gman): %s\n\n" % func.name)
def WriteImmediateCmdGetTotalSize(self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" uint32 total_size = 0; // TODO(gman): get correct size.\n")
def WriteImmediateCmdInit(self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" void Init(%s) {\n" % func.MakeTypedCmdArgString("_"))
self.WriteImmediateCmdGetTotalSize(func, file)
file.Write(" SetHeader(total_size);\n")
args = func.GetCmdArgs()
for arg in args:
file.Write(" %s = _%s;\n" % (arg.name, arg.name))
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdSet(self, func, file):
"""Overrriden from TypeHandler."""
copy_args = func.MakeCmdArgString("_", False)
file.Write(" void* Set(void* cmd%s) {\n" %
func.MakeTypedCmdArgString("_", True))
self.WriteImmediateCmdGetTotalSize(func, file)
file.Write(" static_cast<ValueType*>(cmd)->Init(%s);\n" % copy_args)
file.Write(" return NextImmediateCmdAddressTotalSize<ValueType>("
"cmd, total_size);\n")
file.Write(" }\n")
file.Write("\n")
class TodoHandler(CustomHandler):
"""Handle for commands that are not yet implemented."""
def AddImmediateFunction(self, generator, func):
"""Overrriden from TypeHandler."""
pass
def WriteImmediateFormatTest(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteGLES2ImplementationUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteGLES2Implementation(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("%s GLES2Implementation::%s(%s) {\n" %
(func.return_type, func.original_name,
func.MakeTypedOriginalArgString("")))
file.Write(" // TODO: for now this is a no-op\n")
file.Write(
" SetGLError(GL_INVALID_OPERATION, \"gl%s\", \"not implemented\");\n" %
func.name)
if func.return_type != "void":
file.Write(" return 0;\n")
file.Write("}\n")
file.Write("\n")
def WriteServiceImplementation(self, func, file):
"""Overrriden from TypeHandler."""
file.Write(
"error::Error GLES2DecoderImpl::Handle%s(\n" % func.name)
file.Write(
" uint32 immediate_data_size, const gles2::%s& c) {\n" % func.name)
file.Write(" // TODO: for now this is a no-op\n")
file.Write(
" SetGLError(GL_INVALID_OPERATION, \"gl%s\", \"not implemented\");\n" %
func.name)
file.Write(" return error::kNoError;\n")
file.Write("}\n")
file.Write("\n")
class HandWrittenHandler(CustomHandler):
"""Handler for comands where everything must be written by hand."""
def InitFunction(self, func):
"""Add or adjust anything type specific for this function."""
CustomHandler.InitFunction(self, func)
func.can_auto_generate = False
def WriteStruct(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteDocs(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("// TODO(gman): %s\n\n" % func.name)
def WriteImmediateServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("// TODO(gman): %s\n\n" % func.name)
def WriteBucketServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("// TODO(gman): %s\n\n" % func.name)
def WriteServiceImplementation(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteImmediateServiceImplementation(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteBucketServiceImplementation(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteImmediateCmdHelper(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteBucketCmdHelper(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteCmdHelper(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteFormatTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("// TODO(gman): Write test for %s\n" % func.name)
def WriteImmediateFormatTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("// TODO(gman): Write test for %s\n" % func.name)
def WriteBucketFormatTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("// TODO(gman): Write test for %s\n" % func.name)
class ManualHandler(CustomHandler):
"""Handler for commands who's handlers must be written by hand."""
def __init__(self):
CustomHandler.__init__(self)
def InitFunction(self, func):
"""Overrriden from TypeHandler."""
if (func.name == 'CompressedTexImage2DBucket'):
func.cmd_args = func.cmd_args[:-1]
func.AddCmdArg(Argument('bucket_id', 'GLuint'))
else:
CustomHandler.InitFunction(self, func)
def WriteServiceImplementation(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteBucketServiceImplementation(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("// TODO(gman): %s\n\n" % func.name)
def WriteImmediateServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("// TODO(gman): %s\n\n" % func.name)
def WriteImmediateServiceImplementation(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteImmediateFormatTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("// TODO(gman): Implement test for %s\n" % func.name)
def WriteGLES2Implementation(self, func, file):
"""Overrriden from TypeHandler."""
if func.GetInfo('impl_func'):
super(ManualHandler, self).WriteGLES2Implementation(func, file)
def WriteGLES2ImplementationHeader(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("virtual %s %s(%s) OVERRIDE;\n" %
(func.return_type, func.original_name,
func.MakeTypedOriginalArgString("")))
file.Write("\n")
def WriteImmediateCmdGetTotalSize(self, func, file):
"""Overrriden from TypeHandler."""
# TODO(gman): Move this data to _FUNCTION_INFO?
if func.name == 'ShaderSourceImmediate':
file.Write(" uint32 total_size = ComputeSize(_data_size);\n")
else:
CustomHandler.WriteImmediateCmdGetTotalSize(self, func, file)
class DataHandler(TypeHandler):
"""Handler for glBufferData, glBufferSubData, glTexImage2D, glTexSubImage2D,
glCompressedTexImage2D, glCompressedTexImageSub2D."""
def __init__(self):
TypeHandler.__init__(self)
def InitFunction(self, func):
"""Overrriden from TypeHandler."""
if func.name == 'CompressedTexSubImage2DBucket':
func.cmd_args = func.cmd_args[:-1]
func.AddCmdArg(Argument('bucket_id', 'GLuint'))
def WriteGetDataSizeCode(self, func, file):
"""Overrriden from TypeHandler."""
# TODO(gman): Move this data to _FUNCTION_INFO?
name = func.name
if name.endswith("Immediate"):
name = name[0:-9]
if name == 'BufferData' or name == 'BufferSubData':
file.Write(" uint32 data_size = size;\n")
elif (name == 'CompressedTexImage2D' or
name == 'CompressedTexSubImage2D'):
file.Write(" uint32 data_size = imageSize;\n")
elif (name == 'CompressedTexSubImage2DBucket'):
file.Write(" Bucket* bucket = GetBucket(c.bucket_id);\n")
file.Write(" uint32 data_size = bucket->size();\n")
file.Write(" GLsizei imageSize = data_size;\n")
elif name == 'TexImage2D' or name == 'TexSubImage2D':
code = """ uint32 data_size;
if (!GLES2Util::ComputeImageDataSize(
width, height, format, type, unpack_alignment_, &data_size)) {
return error::kOutOfBounds;
}
"""
file.Write(code)
else:
file.Write("// uint32 data_size = 0; // TODO(gman): get correct size!\n")
def WriteImmediateCmdGetTotalSize(self, func, file):
"""Overrriden from TypeHandler."""
# TODO(gman): Move this data to _FUNCTION_INFO?
if func.name == 'BufferDataImmediate':
file.Write(" uint32 total_size = ComputeSize(_size);\n")
elif func.name == 'BufferSubDataImmediate':
file.Write(" uint32 total_size = ComputeSize(_size);\n")
elif func.name == 'CompressedTexImage2DImmediate':
file.Write(" uint32 total_size = ComputeSize(_imageSize);\n")
elif func.name == 'CompressedTexSubImage2DImmediate':
file.Write(" uint32 total_size = ComputeSize(_imageSize);\n")
elif func.name == 'TexImage2DImmediate':
file.Write(
" uint32 total_size = 0; // TODO(gman): get correct size\n")
elif func.name == 'TexSubImage2DImmediate':
file.Write(
" uint32 total_size = 0; // TODO(gman): get correct size\n")
def WriteImmediateCmdSizeTest(self, func, file):
"""Overrriden from TypeHandler."""
# TODO(gman): Move this data to _FUNCTION_INFO?
if func.name == 'BufferDataImmediate':
file.Write(" uint32 total_size = cmd.ComputeSize(cmd.size);\n")
elif func.name == 'BufferSubDataImmediate':
file.Write(" uint32 total_size = cmd.ComputeSize(cmd.size);\n")
elif func.name == 'CompressedTexImage2DImmediate':
file.Write(" uint32 total_size = cmd.ComputeSize(cmd.imageSize);\n")
elif func.name == 'CompressedTexSubImage2DImmediate':
file.Write(" uint32 total_size = cmd.ComputeSize(cmd.imageSize);\n")
elif func.name == 'TexImage2DImmediate':
file.Write(
" uint32 total_size = 0; // TODO(gman): get correct size\n")
elif func.name == 'TexSubImage2DImmediate':
file.Write(
" uint32 total_size = 0; // TODO(gman): get correct size\n")
file.Write(" EXPECT_EQ(sizeof(cmd), total_size);\n")
def WriteImmediateCmdInit(self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" void Init(%s) {\n" % func.MakeTypedCmdArgString("_"))
self.WriteImmediateCmdGetTotalSize(func, file)
file.Write(" SetHeader(total_size);\n")
args = func.GetCmdArgs()
for arg in args:
file.Write(" %s = _%s;\n" % (arg.name, arg.name))
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdSet(self, func, file):
"""Overrriden from TypeHandler."""
copy_args = func.MakeCmdArgString("_", False)
file.Write(" void* Set(void* cmd%s) {\n" %
func.MakeTypedCmdArgString("_", True))
self.WriteImmediateCmdGetTotalSize(func, file)
file.Write(" static_cast<ValueType*>(cmd)->Init(%s);\n" % copy_args)
file.Write(" return NextImmediateCmdAddressTotalSize<ValueType>("
"cmd, total_size);\n")
file.Write(" }\n")
file.Write("\n")
def WriteImmediateFormatTest(self, func, file):
"""Overrriden from TypeHandler."""
# TODO(gman): Remove this exception.
file.Write("// TODO(gman): Implement test for %s\n" % func.name)
return
def WriteServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("// TODO(gman): %s\n\n" % func.name)
def WriteImmediateServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("// TODO(gman): %s\n\n" % func.name)
def WriteBucketServiceImplementation(self, func, file):
"""Overrriden from TypeHandler."""
if not func.name == 'CompressedTexSubImage2DBucket':
TypeHandler.WriteBucketServiceImplemenation(self, func, file)
class BindHandler(TypeHandler):
"""Handler for glBind___ type functions."""
def __init__(self):
TypeHandler.__init__(self)
def WriteServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
if len(func.GetOriginalArgs()) == 1:
valid_test = """
TEST_F(%(test_name)s, %(name)sValidArgs) {
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s));
SpecializedSetup<%(name)s, 0>(true);
%(name)s cmd;
cmd.Init(%(args)s);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
TEST_F(%(test_name)s, %(name)sValidArgsNewId) {
EXPECT_CALL(*gl_, %(gl_func_name)s(kNewServiceId));
EXPECT_CALL(*gl_, %(gl_gen_func_name)s(1, _))
.WillOnce(SetArgumentPointee<1>(kNewServiceId));
SpecializedSetup<%(name)s, 0>(true);
%(name)s cmd;
cmd.Init(kNewClientId);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_TRUE(Get%(resource_type)sInfo(kNewClientId) != NULL);
}
"""
gen_func_names = {
}
self.WriteValidUnitTest(func, file, valid_test, {
'resource_type': func.GetOriginalArgs()[0].resource_type,
'gl_gen_func_name': func.GetInfo("gen_func"),
})
else:
valid_test = """
TEST_F(%(test_name)s, %(name)sValidArgs) {
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s));
SpecializedSetup<%(name)s, 0>(true);
%(name)s cmd;
cmd.Init(%(args)s);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
TEST_F(%(test_name)s, %(name)sValidArgsNewId) {
EXPECT_CALL(*gl_, %(gl_func_name)s(%(first_gl_arg)s, kNewServiceId));
EXPECT_CALL(*gl_, %(gl_gen_func_name)s(1, _))
.WillOnce(SetArgumentPointee<1>(kNewServiceId));
SpecializedSetup<%(name)s, 0>(true);
%(name)s cmd;
cmd.Init(%(first_arg)s, kNewClientId);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_TRUE(Get%(resource_type)sInfo(kNewClientId) != NULL);
}
"""
gen_func_names = {
}
self.WriteValidUnitTest(func, file, valid_test, {
'first_arg': func.GetOriginalArgs()[0].GetValidArg(func, 0, 0),
'first_gl_arg': func.GetOriginalArgs()[0].GetValidGLArg(func, 0, 0),
'resource_type': func.GetOriginalArgs()[1].resource_type,
'gl_gen_func_name': func.GetInfo("gen_func"),
})
invalid_test = """
TEST_F(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s)).Times(0);
SpecializedSetup<%(name)s, 0>(false);
%(name)s cmd;
cmd.Init(%(args)s);
EXPECT_EQ(error::%(parse_result)s, ExecuteCmd(cmd));%(gl_error_test)s
}
"""
self.WriteInvalidUnitTest(func, file, invalid_test)
def WriteGLES2Implementation(self, func, file):
"""Writes the GLES2 Implemention."""
impl_func = func.GetInfo('impl_func')
impl_decl = func.GetInfo('impl_decl')
if (func.can_auto_generate and
(impl_func == None or impl_func == True) and
(impl_decl == None or impl_decl == True)):
file.Write("%s GLES2Implementation::%s(%s) {\n" %
(func.return_type, func.original_name,
func.MakeTypedOriginalArgString("")))
file.Write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
func.WriteDestinationInitalizationValidation(file)
self.WriteClientGLCallLog(func, file)
for arg in func.GetOriginalArgs():
arg.WriteClientSideValidationCode(file, func)
code = """ if (Is%(type)sReservedId(%(id)s)) {
SetGLError(GL_INVALID_OPERATION, "%(name)s\", \"%(id)s reserved id");
return;
}
if (Bind%(type)sHelper(%(arg_string)s)) {
helper_->%(name)s(%(arg_string)s);
}
CheckGLError();
}
"""
name_arg = None
if len(func.GetOriginalArgs()) == 1:
# Bind functions that have no target (like BindVertexArrayOES)
name_arg = func.GetOriginalArgs()[0]
else:
# Bind functions that have both a target and a name (like BindTexture)
name_arg = func.GetOriginalArgs()[1]
file.Write(code % {
'name': func.name,
'arg_string': func.MakeOriginalArgString(""),
'id': name_arg.name,
'type': name_arg.resource_type,
'lc_type': name_arg.resource_type.lower(),
})
def WriteGLES2ImplementationUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
client_test = func.GetInfo('client_test')
if client_test == False:
return
code = """
TEST_F(GLES2ImplementationTest, %(name)s) {
struct Cmds {
%(name)s cmd;
};
Cmds expected;
expected.cmd.Init(%(cmd_args)s);
gl_->%(name)s(%(args)s);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
ClearCommands();
gl_->%(name)s(%(args)s);
EXPECT_TRUE(NoCommandsWritten());
}
"""
cmd_arg_strings = []
for count, arg in enumerate(func.GetCmdArgs()):
cmd_arg_strings.append(arg.GetValidClientSideCmdArg(func, count, 0))
count += 1
gl_arg_strings = []
for count, arg in enumerate(func.GetOriginalArgs()):
gl_arg_strings.append(arg.GetValidClientSideArg(func, count, 0))
file.Write(code % {
'name': func.name,
'args': ", ".join(gl_arg_strings),
'cmd_args': ", ".join(cmd_arg_strings),
})
class GENnHandler(TypeHandler):
"""Handler for glGen___ type functions."""
def __init__(self):
TypeHandler.__init__(self)
def InitFunction(self, func):
"""Overrriden from TypeHandler."""
pass
def WriteGetDataSizeCode(self, func, file):
"""Overrriden from TypeHandler."""
code = """ uint32 data_size;
if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
return error::kOutOfBounds;
}
"""
file.Write(code)
def WriteHandlerImplementation (self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" if (!%sHelper(n, %s)) {\n"
" return error::kInvalidArguments;\n"
" }\n" %
(func.name, func.GetLastOriginalArg().name))
def WriteImmediateHandlerImplementation(self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" if (!%sHelper(n, %s)) {\n"
" return error::kInvalidArguments;\n"
" }\n" %
(func.original_name, func.GetLastOriginalArg().name))
def WriteGLES2Implementation(self, func, file):
"""Overrriden from TypeHandler."""
log_code = (""" GPU_CLIENT_LOG_CODE_BLOCK({
for (GLsizei i = 0; i < n; ++i) {
GPU_CLIENT_LOG(" " << i << ": " << %s[i]);
}
});""" % func.GetOriginalArgs()[1].name)
args = {
'log_code': log_code,
'return_type': func.return_type,
'name': func.original_name,
'typed_args': func.MakeTypedOriginalArgString(""),
'args': func.MakeOriginalArgString(""),
'resource_types': func.GetInfo('resource_types'),
'count_name': func.GetOriginalArgs()[0].name,
}
file.Write(
"%(return_type)s GLES2Implementation::%(name)s(%(typed_args)s) {\n" %
args)
func.WriteDestinationInitalizationValidation(file)
self.WriteClientGLCallLog(func, file)
for arg in func.GetOriginalArgs():
arg.WriteClientSideValidationCode(file, func)
code = """ GPU_CLIENT_SINGLE_THREAD_CHECK();
GetIdHandler(id_namespaces::k%(resource_types)s)->
MakeIds(this, 0, %(args)s);
%(name)sHelper(%(args)s);
helper_->%(name)sImmediate(%(args)s);
helper_->CommandBufferHelper::Flush();
%(log_code)s
CheckGLError();
}
"""
file.Write(code % args)
def WriteGLES2ImplementationUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
code = """
TEST_F(GLES2ImplementationTest, %(name)s) {
GLuint ids[2] = { 0, };
struct Cmds {
%(name)sImmediate gen;
GLuint data[2];
};
Cmds expected;
expected.gen.Init(arraysize(ids), &ids[0]);
expected.data[0] = k%(types)sStartId;
expected.data[1] = k%(types)sStartId + 1;
gl_->%(name)s(arraysize(ids), &ids[0]);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
EXPECT_EQ(k%(types)sStartId, ids[0]);
EXPECT_EQ(k%(types)sStartId + 1, ids[1]);
}
"""
file.Write(code % {
'name': func.name,
'types': func.GetInfo('resource_types'),
})
def WriteServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
valid_test = """
TEST_F(%(test_name)s, %(name)sValidArgs) {
EXPECT_CALL(*gl_, %(gl_func_name)s(1, _))
.WillOnce(SetArgumentPointee<1>(kNewServiceId));
GetSharedMemoryAs<GLuint*>()[0] = kNewClientId;
SpecializedSetup<%(name)s, 0>(true);
%(name)s cmd;
cmd.Init(%(args)s);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_TRUE(Get%(resource_name)sInfo(kNewClientId) != NULL);
}
"""
self.WriteValidUnitTest(func, file, valid_test, {
'resource_name': func.GetInfo('resource_type'),
})
invalid_test = """
TEST_F(%(test_name)s, %(name)sInvalidArgs) {
EXPECT_CALL(*gl_, %(gl_func_name)s(_, _)).Times(0);
GetSharedMemoryAs<GLuint*>()[0] = client_%(resource_name)s_id_;
SpecializedSetup<%(name)s, 0>(false);
%(name)s cmd;
cmd.Init(%(args)s);
EXPECT_EQ(error::kInvalidArguments, ExecuteCmd(cmd));
}
"""
self.WriteValidUnitTest(func, file, invalid_test, {
'resource_name': func.GetInfo('resource_type').lower(),
})
def WriteImmediateServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
valid_test = """
TEST_F(%(test_name)s, %(name)sValidArgs) {
EXPECT_CALL(*gl_, %(gl_func_name)s(1, _))
.WillOnce(SetArgumentPointee<1>(kNewServiceId));
%(name)s* cmd = GetImmediateAs<%(name)s>();
GLuint temp = kNewClientId;
SpecializedSetup<%(name)s, 0>(true);
cmd->Init(1, &temp);
EXPECT_EQ(error::kNoError,
ExecuteImmediateCmd(*cmd, sizeof(temp)));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_TRUE(Get%(resource_name)sInfo(kNewClientId) != NULL);
}
"""
self.WriteValidUnitTest(func, file, valid_test, {
'resource_name': func.GetInfo('resource_type'),
})
invalid_test = """
TEST_F(%(test_name)s, %(name)sInvalidArgs) {
EXPECT_CALL(*gl_, %(gl_func_name)s(_, _)).Times(0);
%(name)s* cmd = GetImmediateAs<%(name)s>();
SpecializedSetup<%(name)s, 0>(false);
cmd->Init(1, &client_%(resource_name)s_id_);
EXPECT_EQ(error::kInvalidArguments,
ExecuteImmediateCmd(*cmd, sizeof(&client_%(resource_name)s_id_)));
}
"""
self.WriteValidUnitTest(func, file, invalid_test, {
'resource_name': func.GetInfo('resource_type').lower(),
})
def WriteImmediateCmdComputeSize(self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" static uint32 ComputeDataSize(GLsizei n) {\n")
file.Write(
" return static_cast<uint32>(sizeof(GLuint) * n); // NOLINT\n")
file.Write(" }\n")
file.Write("\n")
file.Write(" static uint32 ComputeSize(GLsizei n) {\n")
file.Write(" return static_cast<uint32>(\n")
file.Write(" sizeof(ValueType) + ComputeDataSize(n)); // NOLINT\n")
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdSetHeader(self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" void SetHeader(GLsizei n) {\n")
file.Write(" header.SetCmdByTotalSize<ValueType>(ComputeSize(n));\n")
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdInit(self, func, file):
"""Overrriden from TypeHandler."""
last_arg = func.GetLastOriginalArg()
file.Write(" void Init(%s, %s _%s) {\n" %
(func.MakeTypedCmdArgString("_"),
last_arg.type, last_arg.name))
file.Write(" SetHeader(_n);\n")
args = func.GetCmdArgs()
for arg in args:
file.Write(" %s = _%s;\n" % (arg.name, arg.name))
file.Write(" memcpy(ImmediateDataAddress(this),\n")
file.Write(" _%s, ComputeDataSize(_n));\n" % last_arg.name)
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdSet(self, func, file):
"""Overrriden from TypeHandler."""
last_arg = func.GetLastOriginalArg()
copy_args = func.MakeCmdArgString("_", False)
file.Write(" void* Set(void* cmd%s, %s _%s) {\n" %
(func.MakeTypedCmdArgString("_", True),
last_arg.type, last_arg.name))
file.Write(" static_cast<ValueType*>(cmd)->Init(%s, _%s);\n" %
(copy_args, last_arg.name))
file.Write(" const uint32 size = ComputeSize(_n);\n")
file.Write(" return NextImmediateCmdAddressTotalSize<ValueType>("
"cmd, size);\n")
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdHelper(self, func, file):
"""Overrriden from TypeHandler."""
code = """ void %(name)s(%(typed_args)s) {
const uint32 size = gles2::%(name)s::ComputeSize(n);
gles2::%(name)s* c =
GetImmediateCmdSpaceTotalSize<gles2::%(name)s>(size);
if (c) {
c->Init(%(args)s);
}
}
"""
file.Write(code % {
"name": func.name,
"typed_args": func.MakeTypedOriginalArgString(""),
"args": func.MakeOriginalArgString(""),
})
def WriteImmediateFormatTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("TEST_F(GLES2FormatTest, %s) {\n" % func.name)
file.Write(" static GLuint ids[] = { 12, 23, 34, };\n")
file.Write(" %s& cmd = *GetBufferAs<%s>();\n" % (func.name, func.name))
file.Write(" void* next_cmd = cmd.Set(\n")
file.Write(" &cmd, static_cast<GLsizei>(arraysize(ids)), ids);\n")
file.Write(" EXPECT_EQ(static_cast<uint32>(%s::kCmdId),\n" % func.name)
file.Write(" cmd.header.command);\n")
file.Write(" EXPECT_EQ(sizeof(cmd) +\n")
file.Write(" RoundSizeToMultipleOfEntries(cmd.n * 4u),\n")
file.Write(" cmd.header.size * 4u);\n")
file.Write(" EXPECT_EQ(static_cast<GLsizei>(arraysize(ids)), cmd.n);\n");
file.Write(" CheckBytesWrittenMatchesExpectedSize(\n")
file.Write(" next_cmd, sizeof(cmd) +\n")
file.Write(" RoundSizeToMultipleOfEntries(arraysize(ids) * 4u));\n")
file.Write(" // TODO(gman): Check that ids were inserted;\n")
file.Write("}\n")
file.Write("\n")
class CreateHandler(TypeHandler):
"""Handler for glCreate___ type functions."""
def __init__(self):
TypeHandler.__init__(self)
def InitFunction(self, func):
"""Overrriden from TypeHandler."""
func.AddCmdArg(Argument("client_id", 'uint32'))
def WriteServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
valid_test = """
TEST_F(%(test_name)s, %(name)sValidArgs) {
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s))
.WillOnce(Return(kNewServiceId));
SpecializedSetup<%(name)s, 0>(true);
%(name)s cmd;
cmd.Init(%(args)s%(comma)skNewClientId);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_TRUE(Get%(resource_type)sInfo(kNewClientId) != NULL);
}
"""
comma = ""
if len(func.GetOriginalArgs()):
comma =", "
self.WriteValidUnitTest(func, file, valid_test, {
'comma': comma,
'resource_type': func.name[6:],
})
invalid_test = """
TEST_F(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s)).Times(0);
SpecializedSetup<%(name)s, 0>(false);
%(name)s cmd;
cmd.Init(%(args)s%(comma)skNewClientId);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));%(gl_error_test)s
}
"""
self.WriteInvalidUnitTest(func, file, invalid_test, {
'comma': comma,
})
def WriteHandlerImplementation (self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" uint32 client_id = c.client_id;\n")
file.Write(" if (!%sHelper(%s)) {\n" %
(func.name, func.MakeCmdArgString("")))
file.Write(" return error::kInvalidArguments;\n")
file.Write(" }\n")
def WriteGLES2Implementation(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("%s GLES2Implementation::%s(%s) {\n" %
(func.return_type, func.original_name,
func.MakeTypedOriginalArgString("")))
file.Write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
func.WriteDestinationInitalizationValidation(file)
self.WriteClientGLCallLog(func, file)
for arg in func.GetOriginalArgs():
arg.WriteClientSideValidationCode(file, func)
file.Write(" GLuint client_id;\n")
file.Write(
" GetIdHandler(id_namespaces::kProgramsAndShaders)->\n")
file.Write(" MakeIds(this, 0, 1, &client_id);\n")
file.Write(" helper_->%s(%s);\n" %
(func.name, func.MakeCmdArgString("")))
file.Write(' GPU_CLIENT_LOG("returned " << client_id);\n')
file.Write(" CheckGLError();\n")
file.Write(" return client_id;\n")
file.Write("}\n")
file.Write("\n")
class DeleteHandler(TypeHandler):
"""Handler for glDelete___ single resource type functions."""
def __init__(self):
TypeHandler.__init__(self)
def WriteServiceImplementation(self, func, file):
"""Overrriden from TypeHandler."""
pass
def WriteGLES2Implementation(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("%s GLES2Implementation::%s(%s) {\n" %
(func.return_type, func.original_name,
func.MakeTypedOriginalArgString("")))
file.Write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
func.WriteDestinationInitalizationValidation(file)
self.WriteClientGLCallLog(func, file)
for arg in func.GetOriginalArgs():
arg.WriteClientSideValidationCode(file, func)
file.Write(
" GPU_CLIENT_DCHECK(%s != 0);\n" % func.GetOriginalArgs()[-1].name)
file.Write(" %sHelper(%s);\n" %
(func.original_name, func.GetOriginalArgs()[-1].name))
file.Write(" CheckGLError();\n")
file.Write("}\n")
file.Write("\n")
class DELnHandler(TypeHandler):
"""Handler for glDelete___ type functions."""
def __init__(self):
TypeHandler.__init__(self)
def WriteGetDataSizeCode(self, func, file):
"""Overrriden from TypeHandler."""
code = """ uint32 data_size;
if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
return error::kOutOfBounds;
}
"""
file.Write(code)
def WriteGLES2ImplementationUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
code = """
TEST_F(GLES2ImplementationTest, %(name)s) {
GLuint ids[2] = { k%(types)sStartId, k%(types)sStartId + 1 };
struct Cmds {
%(name)sImmediate del;
GLuint data[2];
};
Cmds expected;
expected.del.Init(arraysize(ids), &ids[0]);
expected.data[0] = k%(types)sStartId;
expected.data[1] = k%(types)sStartId + 1;
gl_->%(name)s(arraysize(ids), &ids[0]);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
}
"""
file.Write(code % {
'name': func.name,
'types': func.GetInfo('resource_types'),
})
def WriteServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
valid_test = """
TEST_F(%(test_name)s, %(name)sValidArgs) {
EXPECT_CALL(
*gl_,
%(gl_func_name)s(1, Pointee(kService%(upper_resource_name)sId)))
.Times(1);
GetSharedMemoryAs<GLuint*>()[0] = client_%(resource_name)s_id_;
SpecializedSetup<%(name)s, 0>(true);
%(name)s cmd;
cmd.Init(%(args)s);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_TRUE(
Get%(upper_resource_name)sInfo(client_%(resource_name)s_id_) == NULL);
}
"""
self.WriteValidUnitTest(func, file, valid_test, {
'resource_name': func.GetInfo('resource_type').lower(),
'upper_resource_name': func.GetInfo('resource_type'),
})
invalid_test = """
TEST_F(%(test_name)s, %(name)sInvalidArgs) {
GetSharedMemoryAs<GLuint*>()[0] = kInvalidClientId;
SpecializedSetup<%(name)s, 0>(false);
%(name)s cmd;
cmd.Init(%(args)s);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
}
"""
self.WriteValidUnitTest(func, file, invalid_test)
def WriteImmediateServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
valid_test = """
TEST_F(%(test_name)s, %(name)sValidArgs) {
EXPECT_CALL(
*gl_,
%(gl_func_name)s(1, Pointee(kService%(upper_resource_name)sId)))
.Times(1);
%(name)s& cmd = *GetImmediateAs<%(name)s>();
SpecializedSetup<%(name)s, 0>(true);
cmd.Init(1, &client_%(resource_name)s_id_);
EXPECT_EQ(error::kNoError,
ExecuteImmediateCmd(cmd, sizeof(client_%(resource_name)s_id_)));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_TRUE(
Get%(upper_resource_name)sInfo(client_%(resource_name)s_id_) == NULL);
}
"""
self.WriteValidUnitTest(func, file, valid_test, {
'resource_name': func.GetInfo('resource_type').lower(),
'upper_resource_name': func.GetInfo('resource_type'),
})
invalid_test = """
TEST_F(%(test_name)s, %(name)sInvalidArgs) {
%(name)s& cmd = *GetImmediateAs<%(name)s>();
SpecializedSetup<%(name)s, 0>(false);
GLuint temp = kInvalidClientId;
cmd.Init(1, &temp);
EXPECT_EQ(error::kNoError,
ExecuteImmediateCmd(cmd, sizeof(temp)));
}
"""
self.WriteValidUnitTest(func, file, invalid_test)
def WriteHandlerImplementation (self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" %sHelper(n, %s);\n" %
(func.name, func.GetLastOriginalArg().name))
def WriteImmediateHandlerImplementation (self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" %sHelper(n, %s);\n" %
(func.original_name, func.GetLastOriginalArg().name))
def WriteGLES2Implementation(self, func, file):
"""Overrriden from TypeHandler."""
impl_decl = func.GetInfo('impl_decl')
if impl_decl == None or impl_decl == True:
args = {
'return_type': func.return_type,
'name': func.original_name,
'typed_args': func.MakeTypedOriginalArgString(""),
'args': func.MakeOriginalArgString(""),
'resource_type': func.GetInfo('resource_type').lower(),
'count_name': func.GetOriginalArgs()[0].name,
}
file.Write(
"%(return_type)s GLES2Implementation::%(name)s(%(typed_args)s) {\n" %
args)
file.Write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
func.WriteDestinationInitalizationValidation(file)
self.WriteClientGLCallLog(func, file)
file.Write(""" GPU_CLIENT_LOG_CODE_BLOCK({
for (GLsizei i = 0; i < n; ++i) {
GPU_CLIENT_LOG(" " << i << ": " << %s[i]);
}
});
""" % func.GetOriginalArgs()[1].name)
file.Write(""" GPU_CLIENT_DCHECK_CODE_BLOCK({
for (GLsizei i = 0; i < n; ++i) {
GPU_DCHECK(%s[i] != 0);
}
});
""" % func.GetOriginalArgs()[1].name)
for arg in func.GetOriginalArgs():
arg.WriteClientSideValidationCode(file, func)
code = """ %(name)sHelper(%(args)s);
CheckGLError();
}
"""
file.Write(code % args)
def WriteImmediateCmdComputeSize(self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" static uint32 ComputeDataSize(GLsizei n) {\n")
file.Write(
" return static_cast<uint32>(sizeof(GLuint) * n); // NOLINT\n")
file.Write(" }\n")
file.Write("\n")
file.Write(" static uint32 ComputeSize(GLsizei n) {\n")
file.Write(" return static_cast<uint32>(\n")
file.Write(" sizeof(ValueType) + ComputeDataSize(n)); // NOLINT\n")
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdSetHeader(self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" void SetHeader(GLsizei n) {\n")
file.Write(" header.SetCmdByTotalSize<ValueType>(ComputeSize(n));\n")
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdInit(self, func, file):
"""Overrriden from TypeHandler."""
last_arg = func.GetLastOriginalArg()
file.Write(" void Init(%s, %s _%s) {\n" %
(func.MakeTypedCmdArgString("_"),
last_arg.type, last_arg.name))
file.Write(" SetHeader(_n);\n")
args = func.GetCmdArgs()
for arg in args:
file.Write(" %s = _%s;\n" % (arg.name, arg.name))
file.Write(" memcpy(ImmediateDataAddress(this),\n")
file.Write(" _%s, ComputeDataSize(_n));\n" % last_arg.name)
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdSet(self, func, file):
"""Overrriden from TypeHandler."""
last_arg = func.GetLastOriginalArg()
copy_args = func.MakeCmdArgString("_", False)
file.Write(" void* Set(void* cmd%s, %s _%s) {\n" %
(func.MakeTypedCmdArgString("_", True),
last_arg.type, last_arg.name))
file.Write(" static_cast<ValueType*>(cmd)->Init(%s, _%s);\n" %
(copy_args, last_arg.name))
file.Write(" const uint32 size = ComputeSize(_n);\n")
file.Write(" return NextImmediateCmdAddressTotalSize<ValueType>("
"cmd, size);\n")
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdHelper(self, func, file):
"""Overrriden from TypeHandler."""
code = """ void %(name)s(%(typed_args)s) {
const uint32 size = gles2::%(name)s::ComputeSize(n);
gles2::%(name)s* c =
GetImmediateCmdSpaceTotalSize<gles2::%(name)s>(size);
if (c) {
c->Init(%(args)s);
}
}
"""
file.Write(code % {
"name": func.name,
"typed_args": func.MakeTypedOriginalArgString(""),
"args": func.MakeOriginalArgString(""),
})
def WriteImmediateFormatTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("TEST_F(GLES2FormatTest, %s) {\n" % func.name)
file.Write(" static GLuint ids[] = { 12, 23, 34, };\n")
file.Write(" %s& cmd = *GetBufferAs<%s>();\n" % (func.name, func.name))
file.Write(" void* next_cmd = cmd.Set(\n")
file.Write(" &cmd, static_cast<GLsizei>(arraysize(ids)), ids);\n")
file.Write(" EXPECT_EQ(static_cast<uint32>(%s::kCmdId),\n" % func.name)
file.Write(" cmd.header.command);\n")
file.Write(" EXPECT_EQ(sizeof(cmd) +\n")
file.Write(" RoundSizeToMultipleOfEntries(cmd.n * 4u),\n")
file.Write(" cmd.header.size * 4u);\n")
file.Write(" EXPECT_EQ(static_cast<GLsizei>(arraysize(ids)), cmd.n);\n");
file.Write(" CheckBytesWrittenMatchesExpectedSize(\n")
file.Write(" next_cmd, sizeof(cmd) +\n")
file.Write(" RoundSizeToMultipleOfEntries(arraysize(ids) * 4u));\n")
file.Write(" // TODO(gman): Check that ids were inserted;\n")
file.Write("}\n")
file.Write("\n")
class GETnHandler(TypeHandler):
"""Handler for GETn for glGetBooleanv, glGetFloatv, ... type functions."""
def __init__(self):
TypeHandler.__init__(self)
def AddImmediateFunction(self, generator, func):
"""Overrriden from TypeHandler."""
pass
def WriteServiceImplementation(self, func, file):
"""Overrriden from TypeHandler."""
file.Write(
"error::Error GLES2DecoderImpl::Handle%s(\n" % func.name)
file.Write(
" uint32 immediate_data_size, const gles2::%s& c) {\n" % func.name)
last_arg = func.GetLastOriginalArg()
all_but_last_args = func.GetOriginalArgs()[:-1]
for arg in all_but_last_args:
arg.WriteGetCode(file)
code = """ typedef %(func_name)s::Result Result;
GLsizei num_values = 0;
GetNumValuesReturnedForGLGet(pname, &num_values);
Result* result = GetSharedMemoryAs<Result*>(
c.params_shm_id, c.params_shm_offset, Result::ComputeSize(num_values));
%(last_arg_type)s params = result ? result->GetData() : NULL;
"""
file.Write(code % {
'last_arg_type': last_arg.type,
'func_name': func.name,
})
func.WriteHandlerValidation(file)
code = """ // Check that the client initialized the result.
if (result->size != 0) {
return error::kInvalidArguments;
}
CopyRealGLErrorsToWrapper();
"""
file.Write(code)
func.WriteHandlerImplementation(file)
code = """ GLenum error = glGetError();
if (error == GL_NO_ERROR) {
result->SetNumResults(num_values);
} else {
SetGLError(error, "", "");
}
return error::kNoError;
}
"""
file.Write(code)
def WriteGLES2Implementation(self, func, file):
"""Overrriden from TypeHandler."""
impl_decl = func.GetInfo('impl_decl')
if impl_decl == None or impl_decl == True:
file.Write("%s GLES2Implementation::%s(%s) {\n" %
(func.return_type, func.original_name,
func.MakeTypedOriginalArgString("")))
file.Write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
func.WriteDestinationInitalizationValidation(file)
self.WriteClientGLCallLog(func, file)
for arg in func.GetOriginalArgs():
arg.WriteClientSideValidationCode(file, func)
all_but_last_args = func.GetOriginalArgs()[:-1]
arg_string = (
", ".join(["%s" % arg.name for arg in all_but_last_args]))
all_arg_string = (
", ".join(["%s" % arg.name for arg in func.GetOriginalArgs()]))
self.WriteTraceEvent(func, file)
code = """ if (%(func_name)sHelper(%(all_arg_string)s)) {
return;
}
typedef %(func_name)s::Result Result;
Result* result = GetResultAs<Result*>();
if (!result) {
return;
}
result->SetNumResults(0);
helper_->%(func_name)s(%(arg_string)s,
GetResultShmId(), GetResultShmOffset());
WaitForCmd();
result->CopyResult(params);
GPU_CLIENT_LOG_CODE_BLOCK({
for (int32 i = 0; i < result->GetNumResults(); ++i) {
GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
}
});
CheckGLError();
}
"""
file.Write(code % {
'func_name': func.name,
'arg_string': arg_string,
'all_arg_string': all_arg_string,
})
def WriteGLES2ImplementationUnitTest(self, func, file):
"""Writes the GLES2 Implemention unit test."""
code = """
TEST_F(GLES2ImplementationTest, %(name)s) {
struct Cmds {
%(name)s cmd;
};
typedef %(name)s::Result Result;
Result::Type result = 0;
Cmds expected;
ExpectedMemoryInfo result1 = GetExpectedResultMemory(4);
expected.cmd.Init(%(cmd_args)s, result1.id, result1.offset);
EXPECT_CALL(*command_buffer(), OnFlush())
.WillOnce(SetMemory(result1.ptr, SizedResultHelper<Result::Type>(1)))
.RetiresOnSaturation();
gl_->%(name)s(%(args)s, &result);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
EXPECT_EQ(static_cast<Result::Type>(1), result);
}
"""
cmd_arg_strings = []
for count, arg in enumerate(func.GetCmdArgs()[0:-2]):
cmd_arg_strings.append(arg.GetValidClientSideCmdArg(func, count, 0))
cmd_arg_strings[0] = '123'
gl_arg_strings = []
for count, arg in enumerate(func.GetOriginalArgs()[0:-1]):
gl_arg_strings.append(arg.GetValidClientSideArg(func, count, 0))
gl_arg_strings[0] = '123'
file.Write(code % {
'name': func.name,
'args': ", ".join(gl_arg_strings),
'cmd_args': ", ".join(cmd_arg_strings),
})
def WriteServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
valid_test = """
TEST_F(%(test_name)s, %(name)sValidArgs) {
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
SpecializedSetup<%(name)s, 0>(true);
typedef %(name)s::Result Result;
Result* result = static_cast<Result*>(shared_memory_address_);
EXPECT_CALL(*gl_, %(gl_func_name)s(%(local_gl_args)s));
result->size = 0;
%(name)s cmd;
cmd.Init(%(args)s);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(
%(valid_pname)s),
result->GetNumResults());
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
"""
gl_arg_strings = []
valid_pname = ''
for count, arg in enumerate(func.GetOriginalArgs()[:-1]):
arg_value = arg.GetValidGLArg(func, count, 0)
gl_arg_strings.append(arg_value)
if arg.name == 'pname':
valid_pname = arg_value
if func.GetInfo('gl_test_func') == 'glGetIntegerv':
gl_arg_strings.append("_")
else:
gl_arg_strings.append("result->GetData()")
self.WriteValidUnitTest(func, file, valid_test, {
'local_gl_args': ", ".join(gl_arg_strings),
'valid_pname': valid_pname,
})
invalid_test = """
TEST_F(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s)).Times(0);
SpecializedSetup<%(name)s, 0>(false);
%(name)s::Result* result =
static_cast<%(name)s::Result*>(shared_memory_address_);
result->size = 0;
%(name)s cmd;
cmd.Init(%(args)s);
EXPECT_EQ(error::%(parse_result)s, ExecuteCmd(cmd));
EXPECT_EQ(0u, result->size);%(gl_error_test)s
}
"""
self.WriteInvalidUnitTest(func, file, invalid_test)
class PUTHandler(TypeHandler):
"""Handler for glTexParameter_v, glVertexAttrib_v functions."""
def __init__(self):
TypeHandler.__init__(self)
def WriteServiceUnitTest(self, func, file):
"""Writes the service unit test for a command."""
valid_test = """
TEST_F(%(test_name)s, %(name)sValidArgs) {
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s));
SpecializedSetup<%(name)s, 0>(true);
%(name)s cmd;
cmd.Init(%(args)s);
GetSharedMemoryAs<%(data_type)s*>()[0] = %(data_value)s;
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
"""
extra = {
'data_type': func.GetInfo('data_type'),
'data_value': func.GetInfo('data_value') or '0',
}
self.WriteValidUnitTest(func, file, valid_test, extra)
invalid_test = """
TEST_F(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s)).Times(0);
SpecializedSetup<%(name)s, 0>(false);
%(name)s cmd;
cmd.Init(%(args)s);
GetSharedMemoryAs<%(data_type)s*>()[0] = %(data_value)s;
EXPECT_EQ(error::%(parse_result)s, ExecuteCmd(cmd));%(gl_error_test)s
}
"""
self.WriteInvalidUnitTest(func, file, invalid_test, extra)
def WriteImmediateServiceUnitTest(self, func, file):
"""Writes the service unit test for a command."""
valid_test = """
TEST_F(%(test_name)s, %(name)sValidArgs) {
%(name)s& cmd = *GetImmediateAs<%(name)s>();
EXPECT_CALL(
*gl_,
%(gl_func_name)s(%(gl_args)s,
reinterpret_cast<%(data_type)s*>(ImmediateDataAddress(&cmd))));
SpecializedSetup<%(name)s, 0>(true);
%(data_type)s temp[%(data_count)s] = { %(data_value)s, };
cmd.Init(%(gl_args)s, &temp[0]);
EXPECT_EQ(error::kNoError,
ExecuteImmediateCmd(cmd, sizeof(temp)));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
"""
gl_arg_strings = []
gl_any_strings = []
for count, arg in enumerate(func.GetOriginalArgs()[0:-1]):
gl_arg_strings.append(arg.GetValidGLArg(func, count, 0))
gl_any_strings.append("_")
extra = {
'data_type': func.GetInfo('data_type'),
'data_count': func.GetInfo('count'),
'data_value': func.GetInfo('data_value') or '0',
'gl_args': ", ".join(gl_arg_strings),
'gl_any_args': ", ".join(gl_any_strings),
}
self.WriteValidUnitTest(func, file, valid_test, extra)
invalid_test = """
TEST_F(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
%(name)s& cmd = *GetImmediateAs<%(name)s>();
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_any_args)s, _)).Times(0);
SpecializedSetup<%(name)s, 0>(false);
%(data_type)s temp[%(data_count)s] = { %(data_value)s, };
cmd.Init(%(all_but_last_args)s, &temp[0]);
EXPECT_EQ(error::%(parse_result)s,
ExecuteImmediateCmd(cmd, sizeof(temp)));%(gl_error_test)s
}
"""
self.WriteInvalidUnitTest(func, file, invalid_test, extra)
def WriteGetDataSizeCode(self, func, file):
"""Overrriden from TypeHandler."""
code = """ uint32 data_size;
if (!ComputeDataSize(1, sizeof(%s), %d, &data_size)) {
return error::kOutOfBounds;
}
"""
file.Write(code % (func.info.data_type, func.info.count))
if func.is_immediate:
file.Write(" if (data_size > immediate_data_size) {\n")
file.Write(" return error::kOutOfBounds;\n")
file.Write(" }\n")
def WriteGLES2Implementation(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("%s GLES2Implementation::%s(%s) {\n" %
(func.return_type, func.original_name,
func.MakeTypedOriginalArgString("")))
file.Write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
func.WriteDestinationInitalizationValidation(file)
self.WriteClientGLCallLog(func, file)
last_arg_name = func.GetLastOriginalArg().name
values_str = ' << ", " << '.join(
["%s[%d]" % (last_arg_name, ndx) for ndx in range(0, func.info.count)])
file.Write(' GPU_CLIENT_LOG("values: " << %s);\n' % values_str)
for arg in func.GetOriginalArgs():
arg.WriteClientSideValidationCode(file, func)
file.Write(" helper_->%sImmediate(%s);\n" %
(func.name, func.MakeOriginalArgString("")))
file.Write(" CheckGLError();\n")
file.Write("}\n")
file.Write("\n")
def WriteGLES2ImplementationUnitTest(self, func, file):
"""Writes the GLES2 Implemention unit test."""
code = """
TEST_F(GLES2ImplementationTest, %(name)s) {
struct Cmds {
%(name)sImmediate cmd;
%(type)s data[%(count)d];
};
Cmds expected;
for (int jj = 0; jj < %(count)d; ++jj) {
expected.data[jj] = static_cast<%(type)s>(jj);
}
expected.cmd.Init(%(cmd_args)s, &expected.data[0]);
gl_->%(name)s(%(args)s, &expected.data[0]);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
}
"""
cmd_arg_strings = []
for count, arg in enumerate(func.GetCmdArgs()[0:-2]):
cmd_arg_strings.append(arg.GetValidClientSideCmdArg(func, count, 0))
gl_arg_strings = []
for count, arg in enumerate(func.GetOriginalArgs()[0:-1]):
gl_arg_strings.append(arg.GetValidClientSideArg(func, count, 0))
file.Write(code % {
'name': func.name,
'type': func.GetInfo('data_type'),
'count': func.GetInfo('count'),
'args': ", ".join(gl_arg_strings),
'cmd_args': ", ".join(cmd_arg_strings),
})
def WriteImmediateCmdComputeSize(self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" static uint32 ComputeDataSize() {\n")
file.Write(" return static_cast<uint32>(\n")
file.Write(" sizeof(%s) * %d); // NOLINT\n" %
(func.info.data_type, func.info.count))
file.Write(" }\n")
file.Write("\n")
file.Write(" static uint32 ComputeSize() {\n")
file.Write(" return static_cast<uint32>(\n")
file.Write(
" sizeof(ValueType) + ComputeDataSize()); // NOLINT\n")
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdSetHeader(self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" void SetHeader() {\n")
file.Write(
" header.SetCmdByTotalSize<ValueType>(ComputeSize());\n")
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdInit(self, func, file):
"""Overrriden from TypeHandler."""
last_arg = func.GetLastOriginalArg()
file.Write(" void Init(%s, %s _%s) {\n" %
(func.MakeTypedCmdArgString("_"),
last_arg.type, last_arg.name))
file.Write(" SetHeader();\n")
args = func.GetCmdArgs()
for arg in args:
file.Write(" %s = _%s;\n" % (arg.name, arg.name))
file.Write(" memcpy(ImmediateDataAddress(this),\n")
file.Write(" _%s, ComputeDataSize());\n" % last_arg.name)
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdSet(self, func, file):
"""Overrriden from TypeHandler."""
last_arg = func.GetLastOriginalArg()
copy_args = func.MakeCmdArgString("_", False)
file.Write(" void* Set(void* cmd%s, %s _%s) {\n" %
(func.MakeTypedCmdArgString("_", True),
last_arg.type, last_arg.name))
file.Write(" static_cast<ValueType*>(cmd)->Init(%s, _%s);\n" %
(copy_args, last_arg.name))
file.Write(" const uint32 size = ComputeSize();\n")
file.Write(" return NextImmediateCmdAddressTotalSize<ValueType>("
"cmd, size);\n")
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdHelper(self, func, file):
"""Overrriden from TypeHandler."""
code = """ void %(name)s(%(typed_args)s) {
const uint32 size = gles2::%(name)s::ComputeSize();
gles2::%(name)s* c =
GetImmediateCmdSpaceTotalSize<gles2::%(name)s>(size);
if (c) {
c->Init(%(args)s);
}
}
"""
file.Write(code % {
"name": func.name,
"typed_args": func.MakeTypedOriginalArgString(""),
"args": func.MakeOriginalArgString(""),
})
def WriteImmediateFormatTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("TEST_F(GLES2FormatTest, %s) {\n" % func.name)
file.Write(" const int kSomeBaseValueToTestWith = 51;\n")
file.Write(" static %s data[] = {\n" % func.info.data_type)
for v in range(0, func.info.count):
file.Write(" static_cast<%s>(kSomeBaseValueToTestWith + %d),\n" %
(func.info.data_type, v))
file.Write(" };\n")
file.Write(" %s& cmd = *GetBufferAs<%s>();\n" % (func.name, func.name))
file.Write(" void* next_cmd = cmd.Set(\n")
file.Write(" &cmd")
args = func.GetCmdArgs()
for value, arg in enumerate(args):
file.Write(",\n static_cast<%s>(%d)" % (arg.type, value + 11))
file.Write(",\n data);\n")
args = func.GetCmdArgs()
file.Write(" EXPECT_EQ(static_cast<uint32>(%s::kCmdId),\n" % func.name)
file.Write(" cmd.header.command);\n")
file.Write(" EXPECT_EQ(sizeof(cmd) +\n")
file.Write(" RoundSizeToMultipleOfEntries(sizeof(data)),\n")
file.Write(" cmd.header.size * 4u);\n")
for value, arg in enumerate(args):
file.Write(" EXPECT_EQ(static_cast<%s>(%d), cmd.%s);\n" %
(arg.type, value + 11, arg.name))
file.Write(" CheckBytesWrittenMatchesExpectedSize(\n")
file.Write(" next_cmd, sizeof(cmd) +\n")
file.Write(" RoundSizeToMultipleOfEntries(sizeof(data)));\n")
file.Write(" // TODO(gman): Check that data was inserted;\n")
file.Write("}\n")
file.Write("\n")
class PUTnHandler(TypeHandler):
"""Handler for PUTn 'glUniform__v' type functions."""
def __init__(self):
TypeHandler.__init__(self)
def WriteServiceUnitTest(self, func, file):
"""Overridden from TypeHandler."""
TypeHandler.WriteServiceUnitTest(self, func, file)
valid_test = """
TEST_F(%(test_name)s, %(name)sValidArgsCountTooLarge) {
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s));
SpecializedSetup<%(name)s, 0>(true);
%(name)s cmd;
cmd.Init(%(args)s);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
"""
gl_arg_strings = []
arg_strings = []
for count, arg in enumerate(func.GetOriginalArgs()):
# hardcoded to match unit tests.
if count == 0:
# the location of the second element of the 2nd uniform.
# defined in GLES2DecoderBase::SetupShaderForUniform
gl_arg_strings.append("3")
arg_strings.append("ProgramManager::MakeFakeLocation(1, 1)")
elif count == 1:
# the number of elements that gl will be called with.
gl_arg_strings.append("3")
# the number of elements requested in the command.
arg_strings.append("5")
else:
gl_arg_strings.append(arg.GetValidGLArg(func, count, 0))
arg_strings.append(arg.GetValidArg(func, count, 0))
extra = {
'gl_args': ", ".join(gl_arg_strings),
'args': ", ".join(arg_strings),
}
self.WriteValidUnitTest(func, file, valid_test, extra)
def WriteImmediateServiceUnitTest(self, func, file):
"""Overridden from TypeHandler."""
valid_test = """
TEST_F(%(test_name)s, %(name)sValidArgs) {
%(name)s& cmd = *GetImmediateAs<%(name)s>();
EXPECT_CALL(
*gl_,
%(gl_func_name)s(%(gl_args)s,
reinterpret_cast<%(data_type)s*>(ImmediateDataAddress(&cmd))));
SpecializedSetup<%(name)s, 0>(true);
%(data_type)s temp[%(data_count)s * 2] = { 0, };
cmd.Init(%(args)s, &temp[0]);
EXPECT_EQ(error::kNoError,
ExecuteImmediateCmd(cmd, sizeof(temp)));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
"""
gl_arg_strings = []
gl_any_strings = []
arg_strings = []
for count, arg in enumerate(func.GetOriginalArgs()[0:-1]):
gl_arg_strings.append(arg.GetValidGLArg(func, count, 0))
gl_any_strings.append("_")
arg_strings.append(arg.GetValidArg(func, count, 0))
extra = {
'data_type': func.GetInfo('data_type'),
'data_count': func.GetInfo('count'),
'args': ", ".join(arg_strings),
'gl_args': ", ".join(gl_arg_strings),
'gl_any_args': ", ".join(gl_any_strings),
}
self.WriteValidUnitTest(func, file, valid_test, extra)
invalid_test = """
TEST_F(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
%(name)s& cmd = *GetImmediateAs<%(name)s>();
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_any_args)s, _)).Times(0);
SpecializedSetup<%(name)s, 0>(false);
%(data_type)s temp[%(data_count)s * 2] = { 0, };
cmd.Init(%(all_but_last_args)s, &temp[0]);
EXPECT_EQ(error::%(parse_result)s,
ExecuteImmediateCmd(cmd, sizeof(temp)));%(gl_error_test)s
}
"""
self.WriteInvalidUnitTest(func, file, invalid_test, extra)
def WriteGetDataSizeCode(self, func, file):
"""Overrriden from TypeHandler."""
code = """ uint32 data_size;
if (!ComputeDataSize(count, sizeof(%s), %d, &data_size)) {
return error::kOutOfBounds;
}
"""
file.Write(code % (func.info.data_type, func.info.count))
if func.is_immediate:
file.Write(" if (data_size > immediate_data_size) {\n")
file.Write(" return error::kOutOfBounds;\n")
file.Write(" }\n")
def WriteGLES2Implementation(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("%s GLES2Implementation::%s(%s) {\n" %
(func.return_type, func.original_name,
func.MakeTypedOriginalArgString("")))
file.Write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
func.WriteDestinationInitalizationValidation(file)
self.WriteClientGLCallLog(func, file)
last_arg_name = func.GetLastOriginalArg().name
file.Write(""" GPU_CLIENT_LOG_CODE_BLOCK({
for (GLsizei i = 0; i < count; ++i) {
""")
values_str = ' << ", " << '.join(
["%s[%d + i * %d]" % (
last_arg_name, ndx, func.info.count) for ndx in range(
0, func.info.count)])
file.Write(' GPU_CLIENT_LOG(" " << i << ": " << %s);\n' % values_str)
file.Write(" }\n });\n")
for arg in func.GetOriginalArgs():
arg.WriteClientSideValidationCode(file, func)
file.Write(" helper_->%sImmediate(%s);\n" %
(func.name, func.MakeOriginalArgString("")))
file.Write(" CheckGLError();\n")
file.Write("}\n")
file.Write("\n")
def WriteGLES2ImplementationUnitTest(self, func, file):
"""Writes the GLES2 Implemention unit test."""
code = """
TEST_F(GLES2ImplementationTest, %(name)s) {
struct Cmds {
%(name)sImmediate cmd;
%(type)s data[2][%(count)d];
};
Cmds expected;
for (int ii = 0; ii < 2; ++ii) {
for (int jj = 0; jj < %(count)d; ++jj) {
expected.data[ii][jj] = static_cast<%(type)s>(ii * %(count)d + jj);
}
}
expected.cmd.Init(%(cmd_args)s, &expected.data[0][0]);
gl_->%(name)s(%(args)s, &expected.data[0][0]);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
}
"""
cmd_arg_strings = []
for count, arg in enumerate(func.GetCmdArgs()[0:-2]):
cmd_arg_strings.append(arg.GetValidClientSideCmdArg(func, count, 0))
gl_arg_strings = []
for count, arg in enumerate(func.GetOriginalArgs()[0:-1]):
gl_arg_strings.append(arg.GetValidClientSideArg(func, count, 0))
file.Write(code % {
'name': func.name,
'type': func.GetInfo('data_type'),
'count': func.GetInfo('count'),
'args': ", ".join(gl_arg_strings),
'cmd_args': ", ".join(cmd_arg_strings),
})
def WriteImmediateCmdComputeSize(self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" static uint32 ComputeDataSize(GLsizei count) {\n")
file.Write(" return static_cast<uint32>(\n")
file.Write(" sizeof(%s) * %d * count); // NOLINT\n" %
(func.info.data_type, func.info.count))
file.Write(" }\n")
file.Write("\n")
file.Write(" static uint32 ComputeSize(GLsizei count) {\n")
file.Write(" return static_cast<uint32>(\n")
file.Write(
" sizeof(ValueType) + ComputeDataSize(count)); // NOLINT\n")
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdSetHeader(self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" void SetHeader(GLsizei count) {\n")
file.Write(
" header.SetCmdByTotalSize<ValueType>(ComputeSize(count));\n")
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdInit(self, func, file):
"""Overrriden from TypeHandler."""
last_arg = func.GetLastOriginalArg()
file.Write(" void Init(%s, %s _%s) {\n" %
(func.MakeTypedCmdArgString("_"),
last_arg.type, last_arg.name))
file.Write(" SetHeader(_count);\n")
args = func.GetCmdArgs()
for arg in args:
file.Write(" %s = _%s;\n" % (arg.name, arg.name))
file.Write(" memcpy(ImmediateDataAddress(this),\n")
file.Write(" _%s, ComputeDataSize(_count));\n" % last_arg.name)
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdSet(self, func, file):
"""Overrriden from TypeHandler."""
last_arg = func.GetLastOriginalArg()
copy_args = func.MakeCmdArgString("_", False)
file.Write(" void* Set(void* cmd%s, %s _%s) {\n" %
(func.MakeTypedCmdArgString("_", True),
last_arg.type, last_arg.name))
file.Write(" static_cast<ValueType*>(cmd)->Init(%s, _%s);\n" %
(copy_args, last_arg.name))
file.Write(" const uint32 size = ComputeSize(_count);\n")
file.Write(" return NextImmediateCmdAddressTotalSize<ValueType>("
"cmd, size);\n")
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdHelper(self, func, file):
"""Overrriden from TypeHandler."""
code = """ void %(name)s(%(typed_args)s) {
const uint32 size = gles2::%(name)s::ComputeSize(count);
gles2::%(name)s* c =
GetImmediateCmdSpaceTotalSize<gles2::%(name)s>(size);
if (c) {
c->Init(%(args)s);
}
}
"""
file.Write(code % {
"name": func.name,
"typed_args": func.MakeTypedOriginalArgString(""),
"args": func.MakeOriginalArgString(""),
})
def WriteImmediateFormatTest(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("TEST_F(GLES2FormatTest, %s) {\n" % func.name)
file.Write(" const int kSomeBaseValueToTestWith = 51;\n")
file.Write(" static %s data[] = {\n" % func.info.data_type)
for v in range(0, func.info.count * 2):
file.Write(" static_cast<%s>(kSomeBaseValueToTestWith + %d),\n" %
(func.info.data_type, v))
file.Write(" };\n")
file.Write(" %s& cmd = *GetBufferAs<%s>();\n" % (func.name, func.name))
file.Write(" const GLsizei kNumElements = 2;\n")
file.Write(" const size_t kExpectedCmdSize =\n")
file.Write(" sizeof(cmd) + kNumElements * sizeof(%s) * %d;\n" %
(func.info.data_type, func.info.count))
file.Write(" void* next_cmd = cmd.Set(\n")
file.Write(" &cmd")
args = func.GetCmdArgs()
for value, arg in enumerate(args):
file.Write(",\n static_cast<%s>(%d)" % (arg.type, value + 1))
file.Write(",\n data);\n")
args = func.GetCmdArgs()
file.Write(" EXPECT_EQ(static_cast<uint32>(%s::kCmdId),\n" % func.name)
file.Write(" cmd.header.command);\n")
file.Write(" EXPECT_EQ(kExpectedCmdSize, cmd.header.size * 4u);\n")
for value, arg in enumerate(args):
file.Write(" EXPECT_EQ(static_cast<%s>(%d), cmd.%s);\n" %
(arg.type, value + 1, arg.name))
file.Write(" CheckBytesWrittenMatchesExpectedSize(\n")
file.Write(" next_cmd, sizeof(cmd) +\n")
file.Write(" RoundSizeToMultipleOfEntries(sizeof(data)));\n")
file.Write(" // TODO(gman): Check that data was inserted;\n")
file.Write("}\n")
file.Write("\n")
class PUTXnHandler(TypeHandler):
"""Handler for glUniform?f functions."""
def __init__(self):
TypeHandler.__init__(self)
def WriteHandlerImplementation(self, func, file):
"""Overrriden from TypeHandler."""
code = """ %(type)s temp[%(count)s] = { %(values)s};
Do%(name)sv(%(location)s, 1, &temp[0]);
"""
values = ""
args = func.GetOriginalArgs()
count = int(func.GetInfo('count'))
num_args = len(args)
for ii in range(count):
values += "%s, " % args[len(args) - count + ii].name
file.Write(code % {
'name': func.name,
'count': func.GetInfo('count'),
'type': func.GetInfo('data_type'),
'location': args[0].name,
'args': func.MakeOriginalArgString(""),
'values': values,
})
def WriteServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
valid_test = """
TEST_F(%(test_name)s, %(name)sValidArgs) {
EXPECT_CALL(*gl_, %(name)sv(%(local_args)s));
SpecializedSetup<%(name)s, 0>(true);
%(name)s cmd;
cmd.Init(%(args)s);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
"""
args = func.GetOriginalArgs()
local_args = "%s, 1, _" % args[0].GetValidGLArg(func, 0, 0)
self.WriteValidUnitTest(func, file, valid_test, {
'name': func.name,
'count': func.GetInfo('count'),
'local_args': local_args,
})
invalid_test = """
TEST_F(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
EXPECT_CALL(*gl_, %(name)sv(_, _, _).Times(0);
SpecializedSetup<%(name)s, 0>(false);
%(name)s cmd;
cmd.Init(%(args)s);
EXPECT_EQ(error::%(parse_result)s, ExecuteCmd(cmd));%(gl_error_test)s
}
"""
self.WriteInvalidUnitTest(func, file, invalid_test, {
'name': func.GetInfo('name'),
'count': func.GetInfo('count'),
})
class GLcharHandler(CustomHandler):
"""Handler for functions that pass a single string ."""
def __init__(self):
CustomHandler.__init__(self)
def WriteImmediateCmdComputeSize(self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" static uint32 ComputeSize(uint32 data_size) {\n")
file.Write(" return static_cast<uint32>(\n")
file.Write(" sizeof(ValueType) + data_size); // NOLINT\n")
file.Write(" }\n")
def WriteImmediateCmdSetHeader(self, func, file):
"""Overrriden from TypeHandler."""
code = """
void SetHeader(uint32 data_size) {
header.SetCmdBySize<ValueType>(data_size);
}
"""
file.Write(code)
def WriteImmediateCmdInit(self, func, file):
"""Overrriden from TypeHandler."""
last_arg = func.GetLastOriginalArg()
args = func.GetCmdArgs()
set_code = []
for arg in args:
set_code.append(" %s = _%s;" % (arg.name, arg.name))
code = """
void Init(%(typed_args)s, uint32 _data_size) {
SetHeader(_data_size);
%(set_code)s
memcpy(ImmediateDataAddress(this), _%(last_arg)s, _data_size);
}
"""
file.Write(code % {
"typed_args": func.MakeTypedOriginalArgString("_"),
"set_code": "\n".join(set_code),
"last_arg": last_arg.name
})
def WriteImmediateCmdSet(self, func, file):
"""Overrriden from TypeHandler."""
last_arg = func.GetLastOriginalArg()
file.Write(" void* Set(void* cmd%s, uint32 _data_size) {\n" %
func.MakeTypedOriginalArgString("_", True))
file.Write(" static_cast<ValueType*>(cmd)->Init(%s, _data_size);\n" %
func.MakeOriginalArgString("_"))
file.Write(" return NextImmediateCmdAddress<ValueType>("
"cmd, _data_size);\n")
file.Write(" }\n")
file.Write("\n")
def WriteImmediateCmdHelper(self, func, file):
"""Overrriden from TypeHandler."""
code = """ void %(name)s(%(typed_args)s) {
const uint32 data_size = strlen(name);
gles2::%(name)s* c = GetImmediateCmdSpace<gles2::%(name)s>(data_size);
if (c) {
c->Init(%(args)s, data_size);
}
}
"""
file.Write(code % {
"name": func.name,
"typed_args": func.MakeTypedOriginalArgString(""),
"args": func.MakeOriginalArgString(""),
})
def WriteImmediateFormatTest(self, func, file):
"""Overrriden from TypeHandler."""
init_code = []
check_code = []
all_but_last_arg = func.GetCmdArgs()[:-1]
for value, arg in enumerate(all_but_last_arg):
init_code.append(" static_cast<%s>(%d)," % (arg.type, value + 11))
for value, arg in enumerate(all_but_last_arg):
check_code.append(" EXPECT_EQ(static_cast<%s>(%d), cmd.%s);" %
(arg.type, value + 11, arg.name))
code = """
TEST_F(GLES2FormatTest, %(func_name)s) {
%(func_name)s& cmd = *GetBufferAs<%(func_name)s>();
static const char* const test_str = \"test string\";
void* next_cmd = cmd.Set(
&cmd,
%(init_code)s
test_str,
strlen(test_str));
EXPECT_EQ(static_cast<uint32>(%(func_name)s::kCmdId),
cmd.header.command);
EXPECT_EQ(sizeof(cmd) +
RoundSizeToMultipleOfEntries(strlen(test_str)),
cmd.header.size * 4u);
EXPECT_EQ(static_cast<char*>(next_cmd),
reinterpret_cast<char*>(&cmd) + sizeof(cmd) +
RoundSizeToMultipleOfEntries(strlen(test_str)));
%(check_code)s
EXPECT_EQ(static_cast<uint32>(strlen(test_str)), cmd.data_size);
EXPECT_EQ(0, memcmp(test_str, ImmediateDataAddress(&cmd), strlen(test_str)));
CheckBytesWritten(
next_cmd,
sizeof(cmd) + RoundSizeToMultipleOfEntries(strlen(test_str)),
sizeof(cmd) + strlen(test_str));
}
"""
file.Write(code % {
'func_name': func.name,
'init_code': "\n".join(init_code),
'check_code': "\n".join(check_code),
})
class GLcharNHandler(CustomHandler):
"""Handler for functions that pass a single string with an optional len."""
def __init__(self):
CustomHandler.__init__(self)
def InitFunction(self, func):
"""Overrriden from TypeHandler."""
func.cmd_args = []
func.AddCmdArg(Argument('bucket_id', 'GLuint'))
def AddImmediateFunction(self, generator, func):
"""Overrriden from TypeHandler."""
pass
def AddBucketFunction(self, generator, func):
"""Overrriden from TypeHandler."""
pass
def WriteServiceImplementation(self, func, file):
"""Overrriden from TypeHandler."""
file.Write("""error::Error GLES2DecoderImpl::Handle%(name)s(
uint32 immediate_data_size, const gles2::%(name)s& c) {
GLuint bucket_id = static_cast<GLuint>(c.%(bucket_id)s);
Bucket* bucket = GetBucket(bucket_id);
if (!bucket || bucket->size() == 0) {
return error::kInvalidArguments;
}
std::string str;
if (!bucket->GetAsString(&str)) {
return error::kInvalidArguments;
}
%(gl_func_name)s(0, str.c_str());
return error::kNoError;
}
""" % {
'name': func.name,
'gl_func_name': func.GetGLFunctionName(),
'bucket_id': func.cmd_args[0].name,
})
class IsHandler(TypeHandler):
"""Handler for glIs____ type and glGetError functions."""
def __init__(self):
TypeHandler.__init__(self)
def InitFunction(self, func):
"""Overrriden from TypeHandler."""
func.AddCmdArg(Argument("result_shm_id", 'uint32'))
func.AddCmdArg(Argument("result_shm_offset", 'uint32'))
if func.GetInfo('result') == None:
func.AddInfo('result', ['uint32'])
def WriteServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
valid_test = """
TEST_F(%(test_name)s, %(name)sValidArgs) {
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s));
SpecializedSetup<%(name)s, 0>(true);
%(name)s cmd;
cmd.Init(%(args)s%(comma)sshared_memory_id_, shared_memory_offset_);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
"""
comma = ""
if len(func.GetOriginalArgs()):
comma =", "
self.WriteValidUnitTest(func, file, valid_test, {
'comma': comma,
})
invalid_test = """
TEST_F(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s)).Times(0);
SpecializedSetup<%(name)s, 0>(false);
%(name)s cmd;
cmd.Init(%(args)s%(comma)sshared_memory_id_, shared_memory_offset_);
EXPECT_EQ(error::%(parse_result)s, ExecuteCmd(cmd));%(gl_error_test)s
}
"""
self.WriteInvalidUnitTest(func, file, invalid_test, {
'comma': comma,
})
invalid_test = """
TEST_F(%(test_name)s, %(name)sInvalidArgsBadSharedMemoryId) {
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s)).Times(0);
SpecializedSetup<%(name)s, 0>(false);
%(name)s cmd;
cmd.Init(%(args)s%(comma)skInvalidSharedMemoryId, shared_memory_offset_);
EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
cmd.Init(%(args)s%(comma)sshared_memory_id_, kInvalidSharedMemoryOffset);
EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
}
"""
self.WriteValidUnitTest(func, file, invalid_test, {
'comma': comma,
})
def WriteServiceImplementation(self, func, file):
"""Overrriden from TypeHandler."""
file.Write(
"error::Error GLES2DecoderImpl::Handle%s(\n" % func.name)
file.Write(
" uint32 immediate_data_size, const gles2::%s& c) {\n" % func.name)
args = func.GetOriginalArgs()
for arg in args:
arg.WriteGetCode(file)
code = """ typedef %(func_name)s::Result Result;
Result* result_dst = GetSharedMemoryAs<Result*>(
c.result_shm_id, c.result_shm_offset, sizeof(*result_dst));
if (!result_dst) {
return error::kOutOfBounds;
}
"""
file.Write(code % {'func_name': func.name})
func.WriteHandlerValidation(file)
file.Write(" *result_dst = %s(%s);\n" %
(func.GetGLFunctionName(), func.MakeOriginalArgString("")))
file.Write(" return error::kNoError;\n")
file.Write("}\n")
file.Write("\n")
def WriteGLES2Implementation(self, func, file):
"""Overrriden from TypeHandler."""
impl_func = func.GetInfo('impl_func')
if impl_func == None or impl_func == True:
error_value = func.GetInfo("error_value") or "GL_FALSE"
file.Write("%s GLES2Implementation::%s(%s) {\n" %
(func.return_type, func.original_name,
func.MakeTypedOriginalArgString("")))
file.Write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
self.WriteTraceEvent(func, file)
func.WriteDestinationInitalizationValidation(file)
self.WriteClientGLCallLog(func, file)
file.Write(" typedef %s::Result Result;\n" % func.name)
file.Write(" Result* result = GetResultAs<Result*>();\n")
file.Write(" if (!result) {\n")
file.Write(" return %s;\n" % error_value)
file.Write(" }\n")
file.Write(" *result = 0;\n")
arg_string = func.MakeOriginalArgString("")
comma = ""
if len(arg_string) > 0:
comma = ", "
file.Write(
" helper_->%s(%s%sGetResultShmId(), GetResultShmOffset());\n" %
(func.name, arg_string, comma))
file.Write(" WaitForCmd();\n")
file.Write(" %s result_value = *result;\n" % func.return_type)
file.Write(' GPU_CLIENT_LOG("returned " << result_value);\n')
file.Write(" CheckGLError();\n")
file.Write(" return result_value;\n")
file.Write("}\n")
file.Write("\n")
def WriteGLES2ImplementationUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
client_test = func.GetInfo('client_test')
if client_test == None or client_test == True:
code = """
TEST_F(GLES2ImplementationTest, %(name)s) {
struct Cmds {
%(name)s cmd;
};
typedef %(name)s::Result Result;
Cmds expected;
ExpectedMemoryInfo result1 =
GetExpectedResultMemory(sizeof(%(name)s::Result));
expected.cmd.Init(1, result1.id, result1.offset);
EXPECT_CALL(*command_buffer(), OnFlush())
.WillOnce(SetMemory(result1.ptr, uint32(1)))
.RetiresOnSaturation();
GLboolean result = gl_->%(name)s(1);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
EXPECT_TRUE(result);
}
"""
file.Write(code % {
'name': func.name,
})
class STRnHandler(TypeHandler):
"""Handler for GetProgramInfoLog, GetShaderInfoLog, GetShaderSource, and
GetTranslatedShaderSourceANGLE."""
def __init__(self):
TypeHandler.__init__(self)
def InitFunction(self, func):
"""Overrriden from TypeHandler."""
# remove all but the first cmd args.
cmd_args = func.GetCmdArgs()
func.ClearCmdArgs()
func.AddCmdArg(cmd_args[0])
# add on a bucket id.
func.AddCmdArg(Argument('bucket_id', 'uint32'))
def WriteGLES2Implementation(self, func, file):
"""Overrriden from TypeHandler."""
code_1 = """%(return_type)s GLES2Implementation::%(func_name)s(%(args)s) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
"""
code_2 = """ GPU_CLIENT_LOG("[" << GetLogPrefix()
<< "] gl%(func_name)s" << "("
<< %(arg0)s << ", "
<< %(arg1)s << ", "
<< static_cast<void*>(%(arg2)s) << ", "
<< static_cast<void*>(%(arg3)s) << ")");
helper_->SetBucketSize(kResultBucketId, 0);
helper_->%(func_name)s(%(id_name)s, kResultBucketId);
std::string str;
GLsizei max_size = 0;
if (GetBucketAsString(kResultBucketId, &str)) {
if (bufsize > 0) {
max_size =
std::min(static_cast<size_t>(%(bufsize_name)s) - 1, str.size());
memcpy(%(dest_name)s, str.c_str(), max_size);
%(dest_name)s[max_size] = '\\0';
GPU_CLIENT_LOG("------\\n" << %(dest_name)s << "\\n------");
}
}
if (%(length_name)s != NULL) {
*%(length_name)s = max_size;
}
CheckGLError();
}
"""
args = func.GetOriginalArgs()
str_args = {
'return_type': func.return_type,
'func_name': func.original_name,
'args': func.MakeTypedOriginalArgString(""),
'id_name': args[0].name,
'bufsize_name': args[1].name,
'length_name': args[2].name,
'dest_name': args[3].name,
'arg0': args[0].name,
'arg1': args[1].name,
'arg2': args[2].name,
'arg3': args[3].name,
}
file.Write(code_1 % str_args)
func.WriteDestinationInitalizationValidation(file)
file.Write(code_2 % str_args)
def WriteServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
valid_test = """
TEST_F(%(test_name)s, %(name)sValidArgs) {
const char* kInfo = "hello";
const uint32 kBucketId = 123;
SpecializedSetup<%(name)s, 0>(true);
%(expect_len_code)s
EXPECT_CALL(*gl_, %(gl_func_name)s(%(gl_args)s))
.WillOnce(DoAll(SetArgumentPointee<2>(strlen(kInfo)),
SetArrayArgument<3>(kInfo, kInfo + strlen(kInfo) + 1)));
%(name)s cmd;
cmd.Init(%(args)s);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
CommonDecoder::Bucket* bucket = decoder_->GetBucket(kBucketId);
ASSERT_TRUE(bucket != NULL);
EXPECT_EQ(strlen(kInfo) + 1, bucket->size());
EXPECT_EQ(0, memcmp(bucket->GetData(0, bucket->size()), kInfo,
bucket->size()));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
"""
args = func.GetOriginalArgs()
id_name = args[0].GetValidGLArg(func, 0, 0)
get_len_func = func.GetInfo('get_len_func')
get_len_enum = func.GetInfo('get_len_enum')
sub = {
'id_name': id_name,
'get_len_func': get_len_func,
'get_len_enum': get_len_enum,
'gl_args': '%s, strlen(kInfo) + 1, _, _' %
args[0].GetValidGLArg(func, 0, 0),
'args': '%s, kBucketId' % args[0].GetValidArg(func, 0, 0),
'expect_len_code': '',
}
if get_len_func and get_len_func[0:2] == 'gl':
sub['expect_len_code'] = (
" EXPECT_CALL(*gl_, %s(%s, %s, _))\n"
" .WillOnce(SetArgumentPointee<2>(strlen(kInfo) + 1));") % (
get_len_func[2:], id_name, get_len_enum)
self.WriteValidUnitTest(func, file, valid_test, sub)
invalid_test = """
TEST_F(%(test_name)s, %(name)sInvalidArgs) {
const uint32 kBucketId = 123;
EXPECT_CALL(*gl_, %(gl_func_name)s(_, _, _, _))
.Times(0);
%(name)s cmd;
cmd.Init(kInvalidClientId, kBucketId);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
}
"""
self.WriteValidUnitTest(func, file, invalid_test)
def WriteServiceImplementation(self, func, file):
"""Overrriden from TypeHandler."""
pass
class FunctionInfo(object):
"""Holds info about a function."""
def __init__(self, info, type_handler):
for key in info:
setattr(self, key, info[key])
self.type_handler = type_handler
if not 'type' in info:
self.type = ''
class Argument(object):
"""A class that represents a function argument."""
cmd_type_map_ = {
'GLenum': 'uint32',
'GLint': 'int32',
'GLintptr': 'int32',
'GLsizei': 'int32',
'GLsizeiptr': 'int32',
'GLfloat': 'float',
'GLclampf': 'float',
}
need_validation_ = ['GLsizei*', 'GLboolean*', 'GLenum*', 'GLint*']
def __init__(self, name, type):
self.name = name
self.optional = type.endswith("Optional*")
if self.optional:
type = type[:-9] + "*"
self.type = type
if type in self.cmd_type_map_:
self.cmd_type = self.cmd_type_map_[type]
else:
self.cmd_type = 'uint32'
def IsPointer(self):
"""Returns true if argument is a pointer."""
return False
def AddCmdArgs(self, args):
"""Adds command arguments for this argument to the given list."""
return args.append(self)
def AddInitArgs(self, args):
"""Adds init arguments for this argument to the given list."""
return args.append(self)
def GetValidArg(self, func, offset, index):
"""Gets a valid value for this argument."""
valid_arg = func.GetValidArg(offset)
if valid_arg != None:
return valid_arg
return str(offset + 1)
def GetValidClientSideArg(self, func, offset, index):
"""Gets a valid value for this argument."""
return str(offset + 1)
def GetValidClientSideCmdArg(self, func, offset, index):
"""Gets a valid value for this argument."""
return str(offset + 1)
def GetValidGLArg(self, func, offset, index):
"""Gets a valid GL value for this argument."""
valid_arg = func.GetValidArg(offset)
if valid_arg != None:
return valid_arg
return str(offset + 1)
def GetNumInvalidValues(self, func):
"""returns the number of invalid values to be tested."""
return 0
def GetInvalidArg(self, offset, index):
"""returns an invalid value and expected parse result by index."""
return ("---ERROR0---", "---ERROR2---", None)
def GetLogArg(self):
"""Get argument appropriate for LOG macro."""
if self.type == 'GLboolean':
return 'GLES2Util::GetStringBool(%s)' % self.name
if self.type == 'GLenum':
return 'GLES2Util::GetStringEnum(%s)' % self.name
return self.name
def WriteGetCode(self, file):
"""Writes the code to get an argument from a command structure."""
file.Write(" %s %s = static_cast<%s>(c.%s);\n" %
(self.type, self.name, self.type, self.name))
def WriteValidationCode(self, file, func):
"""Writes the validation code for an argument."""
pass
def WriteClientSideValidationCode(self, file, func):
"""Writes the validation code for an argument."""
pass
def WriteDestinationInitalizationValidation(self, file, func):
"""Writes the client side destintion initialization validation."""
pass
def WriteDestinationInitalizationValidatationIfNeeded(self, file, func):
"""Writes the client side destintion initialization validation if needed."""
parts = self.type.split(" ")
if len(parts) > 1:
return
if parts[0] in self.need_validation_:
file.Write(
" GPU_CLIENT_VALIDATE_DESTINATION_%sINITALIZATION(%s, %s);\n" %
("OPTIONAL_" if self.optional else "", self.type[:-1], self.name))
def WriteGetAddress(self, file):
"""Writes the code to get the address this argument refers to."""
pass
def GetImmediateVersion(self):
"""Gets the immediate version of this argument."""
return self
def GetBucketVersion(self):
"""Gets the bucket version of this argument."""
return self
class BoolArgument(Argument):
"""class for GLboolean"""
def __init__(self, name, type):
Argument.__init__(self, name, 'GLboolean')
def GetValidArg(self, func, offset, index):
"""Gets a valid value for this argument."""
return 'true'
def GetValidClientSideArg(self, func, offset, index):
"""Gets a valid value for this argument."""
return 'true'
def GetValidClientSideCmdArg(self, func, offset, index):
"""Gets a valid value for this argument."""
return 'true'
def GetValidGLArg(self, func, offset, index):
"""Gets a valid GL value for this argument."""
return 'true'
class UniformLocationArgument(Argument):
"""class for uniform locations."""
def __init__(self, name):
Argument.__init__(self, name, "GLint")
def WriteGetCode(self, file):
"""Writes the code to get an argument from a command structure."""
code = """ %s %s = static_cast<%s>(c.%s);
"""
file.Write(code % (self.type, self.name, self.type, self.name))
def GetValidArg(self, func, offset, index):
"""Gets a valid value for this argument."""
return "%d" % (offset + 1)
class DataSizeArgument(Argument):
"""class for data_size which Bucket commands do not need."""
def __init__(self, name):
Argument.__init__(self, name, "uint32")
def GetBucketVersion(self):
return None
class SizeArgument(Argument):
"""class for GLsizei and GLsizeiptr."""
def __init__(self, name, type):
Argument.__init__(self, name, type)
def GetNumInvalidValues(self, func):
"""overridden from Argument."""
if func.is_immediate:
return 0
return 1
def GetInvalidArg(self, offset, index):
"""overridden from Argument."""
return ("-1", "kNoError", "GL_INVALID_VALUE")
def WriteValidationCode(self, file, func):
"""overridden from Argument."""
file.Write(" if (%s < 0) {\n" % self.name)
file.Write(" SetGLError(GL_INVALID_VALUE, \"gl%s\", \"%s < 0\");\n" %
(func.original_name, self.name))
file.Write(" return error::kNoError;\n")
file.Write(" }\n")
def WriteClientSideValidationCode(self, file, func):
"""overridden from Argument."""
file.Write(" if (%s < 0) {\n" % self.name)
file.Write(" SetGLError(GL_INVALID_VALUE, \"gl%s\", \"%s < 0\");\n" %
(func.original_name, self.name))
file.Write(" return;\n")
file.Write(" }\n")
class SizeNotNegativeArgument(SizeArgument):
"""class for GLsizeiNotNegative. It's NEVER allowed to be negative"""
def __init__(self, name, type, gl_type):
SizeArgument.__init__(self, name, gl_type)
def GetInvalidArg(self, offset, index):
"""overridden from SizeArgument."""
return ("-1", "kOutOfBounds", "GL_NO_ERROR")
def WriteValidationCode(self, file, func):
"""overridden from SizeArgument."""
pass
class EnumBaseArgument(Argument):
"""Base class for EnumArgument, IntArgument and ValidatedBoolArgument"""
def __init__(self, name, gl_type, type, gl_error):
Argument.__init__(self, name, gl_type)
self.local_type = type
self.gl_error = gl_error
name = type[len(gl_type):]
self.type_name = name
self.enum_info = _ENUM_LISTS[name]
def WriteValidationCode(self, file, func):
file.Write(" if (!validators_->%s.IsValid(%s)) {\n" %
(ToUnderscore(self.type_name), self.name))
if self.gl_error == "GL_INVALID_ENUM":
file.Write(
" SetGLErrorInvalidEnum(\"gl%s\", %s, \"%s\");\n" %
(func.original_name, self.name, self.name))
else:
file.Write(
" SetGLError(%s, \"gl%s\", \"%s %s\");\n" %
(self.gl_error, func.original_name, self.name, self.gl_error))
file.Write(" return error::kNoError;\n")
file.Write(" }\n")
def GetValidArg(self, func, offset, index):
valid_arg = func.GetValidArg(offset)
if valid_arg != None:
return valid_arg
if 'valid' in self.enum_info:
valid = self.enum_info['valid']
num_valid = len(valid)
if index >= num_valid:
index = num_valid - 1
return valid[index]
return str(offset + 1)
def GetValidClientSideArg(self, func, offset, index):
"""Gets a valid value for this argument."""
return self.GetValidArg(func, offset, index)
def GetValidClientSideCmdArg(self, func, offset, index):
"""Gets a valid value for this argument."""
return self.GetValidArg(func, offset, index)
def GetValidGLArg(self, func, offset, index):
"""Gets a valid value for this argument."""
return self.GetValidArg(func, offset, index)
def GetNumInvalidValues(self, func):
"""returns the number of invalid values to be tested."""
if 'invalid' in self.enum_info:
invalid = self.enum_info['invalid']
return len(invalid)
return 0
def GetInvalidArg(self, offset, index):
"""returns an invalid value by index."""
if 'invalid' in self.enum_info:
invalid = self.enum_info['invalid']
num_invalid = len(invalid)
if index >= num_invalid:
index = num_invalid - 1
return (invalid[index], "kNoError", self.gl_error)
return ("---ERROR1---", "kNoError", self.gl_error)
class EnumArgument(EnumBaseArgument):
"""A class that represents a GLenum argument"""
def __init__(self, name, type):
EnumBaseArgument.__init__(self, name, "GLenum", type, "GL_INVALID_ENUM")
def GetLogArg(self):
"""Overridden from Argument."""
return ("GLES2Util::GetString%s(%s)" %
(self.type_name, self.name))
class IntArgument(EnumBaseArgument):
"""A class for a GLint argument that can only except specific values.
For example glTexImage2D takes a GLint for its internalformat
argument instead of a GLenum.
"""
def __init__(self, name, type):
EnumBaseArgument.__init__(self, name, "GLint", type, "GL_INVALID_VALUE")
class ValidatedBoolArgument(EnumBaseArgument):
"""A class for a GLboolean argument that can only except specific values.
For example glUniformMatrix takes a GLboolean for it's transpose but it
must be false.
"""
def __init__(self, name, type):
EnumBaseArgument.__init__(self, name, "GLboolean", type, "GL_INVALID_VALUE")
def GetLogArg(self):
"""Overridden from Argument."""
return 'GLES2Util::GetStringBool(%s)' % self.name
class ImmediatePointerArgument(Argument):
"""A class that represents an immediate argument to a function.
An immediate argument is one where the data follows the command.
"""
def __init__(self, name, type):
Argument.__init__(self, name, type)
def AddCmdArgs(self, args):
"""Overridden from Argument."""
pass
def WriteGetCode(self, file):
"""Overridden from Argument."""
file.Write(
" %s %s = GetImmediateDataAs<%s>(\n" %
(self.type, self.name, self.type))
file.Write(" c, data_size, immediate_data_size);\n")
def WriteValidationCode(self, file, func):
"""Overridden from Argument."""
file.Write(" if (%s == NULL) {\n" % self.name)
file.Write(" return error::kOutOfBounds;\n")
file.Write(" }\n")
def GetImmediateVersion(self):
"""Overridden from Argument."""
return None
def WriteDestinationInitalizationValidation(self, file, func):
"""Overridden from Argument."""
self.WriteDestinationInitalizationValidatationIfNeeded(file, func)
def GetLogArg(self):
"""Overridden from Argument."""
return "static_cast<const void*>(%s)" % self.name
class BucketPointerArgument(Argument):
"""A class that represents an bucket argument to a function."""
def __init__(self, name, type):
Argument.__init__(self, name, type)
def AddCmdArgs(self, args):
"""Overridden from Argument."""
pass
def WriteGetCode(self, file):
"""Overridden from Argument."""
file.Write(
" %s %s = bucket->GetData(0, data_size);\n" %
(self.type, self.name))
def WriteValidationCode(self, file, func):
"""Overridden from Argument."""
pass
def GetImmediateVersion(self):
"""Overridden from Argument."""
return None
def WriteDestinationInitalizationValidation(self, file, func):
"""Overridden from Argument."""
self.WriteDestinationInitalizationValidatationIfNeeded(file, func)
def GetLogArg(self):
"""Overridden from Argument."""
return "static_cast<const void*>(%s)" % self.name
class PointerArgument(Argument):
"""A class that represents a pointer argument to a function."""
def __init__(self, name, type):
Argument.__init__(self, name, type)
def IsPointer(self):
"""Returns true if argument is a pointer."""
return True
def GetValidArg(self, func, offset, index):
"""Overridden from Argument."""
return "shared_memory_id_, shared_memory_offset_"
def GetValidGLArg(self, func, offset, index):
"""Overridden from Argument."""
return "reinterpret_cast<%s>(shared_memory_address_)" % self.type
def GetNumInvalidValues(self, func):
"""Overridden from Argument."""
return 2
def GetInvalidArg(self, offset, index):
"""Overridden from Argument."""
if index == 0:
return ("kInvalidSharedMemoryId, 0", "kOutOfBounds", None)
else:
return ("shared_memory_id_, kInvalidSharedMemoryOffset",
"kOutOfBounds", None)
def GetLogArg(self):
"""Overridden from Argument."""
return "static_cast<const void*>(%s)" % self.name
def AddCmdArgs(self, args):
"""Overridden from Argument."""
args.append(Argument("%s_shm_id" % self.name, 'uint32'))
args.append(Argument("%s_shm_offset" % self.name, 'uint32'))
def WriteGetCode(self, file):
"""Overridden from Argument."""
file.Write(
" %s %s = GetSharedMemoryAs<%s>(\n" %
(self.type, self.name, self.type))
file.Write(
" c.%s_shm_id, c.%s_shm_offset, data_size);\n" %
(self.name, self.name))
def WriteGetAddress(self, file):
"""Overridden from Argument."""
file.Write(
" %s %s = GetSharedMemoryAs<%s>(\n" %
(self.type, self.name, self.type))
file.Write(
" %s_shm_id, %s_shm_offset, %s_size);\n" %
(self.name, self.name, self.name))
def WriteValidationCode(self, file, func):
"""Overridden from Argument."""
file.Write(" if (%s == NULL) {\n" % self.name)
file.Write(" return error::kOutOfBounds;\n")
file.Write(" }\n")
def GetImmediateVersion(self):
"""Overridden from Argument."""
return ImmediatePointerArgument(self.name, self.type)
def GetBucketVersion(self):
"""Overridden from Argument."""
if self.type == "const char*":
return InputStringBucketArgument(self.name, self.type)
return BucketPointerArgument(self.name, self.type)
def WriteDestinationInitalizationValidation(self, file, func):
"""Overridden from Argument."""
self.WriteDestinationInitalizationValidatationIfNeeded(file, func)
class InputStringBucketArgument(Argument):
"""An string input argument where the string is passed in a bucket."""
def __init__(self, name, type):
Argument.__init__(self, name + "_bucket_id", "uint32")
def WriteGetCode(self, file):
"""Overridden from Argument."""
code = """
Bucket* %(name)s_bucket = GetBucket(c.%(name)s);
if (!%(name)s_bucket) {
return error::kInvalidArguments;
}
std::string %(name)s_str;
if (!%(name)s_bucket->GetAsString(&%(name)s_str)) {
return error::kInvalidArguments;
}
const char* %(name)s = %(name)s_str.c_str();
"""
file.Write(code % {
'name': self.name,
})
def GetValidArg(self, func, offset, index):
return "kNameBucketId"
def GetValidGLArg(self, func, offset, index):
return "_"
class NonImmediatePointerArgument(PointerArgument):
"""A pointer argument that stays a pointer even in an immediate cmd."""
def __init__(self, name, type):
PointerArgument.__init__(self, name, type)
def IsPointer(self):
"""Returns true if argument is a pointer."""
return False
def GetImmediateVersion(self):
"""Overridden from Argument."""
return self
class ResourceIdArgument(Argument):
"""A class that represents a resource id argument to a function."""
def __init__(self, name, type):
match = re.match("(GLid\w+)", type)
self.resource_type = match.group(1)[4:]
type = type.replace(match.group(1), "GLuint")
Argument.__init__(self, name, type)
def WriteGetCode(self, file):
"""Overridden from Argument."""
file.Write(" %s %s = c.%s;\n" % (self.type, self.name, self.name))
def GetValidArg(self, func, offset, index):
return "client_%s_id_" % self.resource_type.lower()
def GetValidGLArg(self, func, offset, index):
return "kService%sId" % self.resource_type
class ResourceIdBindArgument(Argument):
"""Represents a resource id argument to a bind function."""
def __init__(self, name, type):
match = re.match("(GLidBind\w+)", type)
self.resource_type = match.group(1)[8:]
type = type.replace(match.group(1), "GLuint")
Argument.__init__(self, name, type)
def WriteGetCode(self, file):
"""Overridden from Argument."""
code = """ %(type)s %(name)s = c.%(name)s;
"""
file.Write(code % {'type': self.type, 'name': self.name})
def GetValidArg(self, func, offset, index):
return "client_%s_id_" % self.resource_type.lower()
def GetValidGLArg(self, func, offset, index):
return "kService%sId" % self.resource_type
class ResourceIdZeroArgument(Argument):
"""Represents a resource id argument to a function that can be zero."""
def __init__(self, name, type):
match = re.match("(GLidZero\w+)", type)
self.resource_type = match.group(1)[8:]
type = type.replace(match.group(1), "GLuint")
Argument.__init__(self, name, type)
def WriteGetCode(self, file):
"""Overridden from Argument."""
file.Write(" %s %s = c.%s;\n" % (self.type, self.name, self.name))
def GetValidArg(self, func, offset, index):
return "client_%s_id_" % self.resource_type.lower()
def GetValidGLArg(self, func, offset, index):
return "kService%sId" % self.resource_type
def GetNumInvalidValues(self, func):
"""returns the number of invalid values to be tested."""
return 1
def GetInvalidArg(self, offset, index):
"""returns an invalid value by index."""
return ("kInvalidClientId", "kNoError", "GL_INVALID_VALUE")
class Function(object):
"""A class that represents a function."""
def __init__(self, original_name, name, info, return_type, original_args,
args_for_cmds, cmd_args, init_args, num_pointer_args):
self.name = name
self.original_name = original_name
self.info = info
self.type_handler = info.type_handler
self.return_type = return_type
self.original_args = original_args
self.num_pointer_args = num_pointer_args
self.can_auto_generate = num_pointer_args == 0 and return_type == "void"
self.cmd_args = cmd_args
self.init_args = init_args
self.InitFunction()
self.args_for_cmds = args_for_cmds
self.is_immediate = False
def IsType(self, type_name):
"""Returns true if function is a certain type."""
return self.info.type == type_name
def InitFunction(self):
"""Calls the init function for the type handler."""
self.type_handler.InitFunction(self)
def GetInfo(self, name):
"""Returns a value from the function info for this function."""
if hasattr(self.info, name):
return getattr(self.info, name)
return None
def GetValidArg(self, index):
"""Gets a valid arg from the function info if one exists."""
valid_args = self.GetInfo('valid_args')
if valid_args and str(index) in valid_args:
return valid_args[str(index)]
return None
def AddInfo(self, name, value):
"""Adds an info."""
setattr(self.info, name, value)
def IsCoreGLFunction(self):
return (not self.GetInfo('extension') and
not self.GetInfo('pepper_interface'))
def InPepperInterface(self, interface):
ext = self.GetInfo('pepper_interface')
if not interface.GetName():
return self.IsCoreGLFunction()
return ext == interface.GetName()
def InAnyPepperExtension(self):
return self.IsCoreGLFunction() or self.GetInfo('pepper_interface')
def GetGLFunctionName(self):
"""Gets the function to call to execute GL for this command."""
if self.GetInfo('decoder_func'):
return self.GetInfo('decoder_func')
return "gl%s" % self.original_name
def GetGLTestFunctionName(self):
gl_func_name = self.GetInfo('gl_test_func')
if gl_func_name == None:
gl_func_name = self.GetGLFunctionName()
if gl_func_name.startswith("gl"):
gl_func_name = gl_func_name[2:]
else:
gl_func_name = self.original_name
return gl_func_name
def AddCmdArg(self, arg):
"""Adds a cmd argument to this function."""
self.cmd_args.append(arg)
def GetCmdArgs(self):
"""Gets the command args for this function."""
return self.cmd_args
def ClearCmdArgs(self):
"""Clears the command args for this function."""
self.cmd_args = []
def GetInitArgs(self):
"""Gets the init args for this function."""
return self.init_args
def GetOriginalArgs(self):
"""Gets the original arguments to this function."""
return self.original_args
def GetLastOriginalArg(self):
"""Gets the last original argument to this function."""
return self.original_args[len(self.original_args) - 1]
def __GetArgList(self, arg_string, add_comma):
"""Adds a comma if arg_string is not empty and add_comma is true."""
comma = ""
if add_comma and len(arg_string):
comma = ", "
return "%s%s" % (comma, arg_string)
def MakeTypedOriginalArgString(self, prefix, add_comma = False):
"""Gets a list of arguments as they arg in GL."""
args = self.GetOriginalArgs()
arg_string = ", ".join(
["%s %s%s" % (arg.type, prefix, arg.name) for arg in args])
return self.__GetArgList(arg_string, add_comma)
def MakeOriginalArgString(self, prefix, add_comma = False, separator = ", "):
"""Gets the list of arguments as they are in GL."""
args = self.GetOriginalArgs()
arg_string = separator.join(
["%s%s" % (prefix, arg.name) for arg in args])
return self.__GetArgList(arg_string, add_comma)
def MakeTypedCmdArgString(self, prefix, add_comma = False):
"""Gets a typed list of arguments as they need to be for command buffers."""
args = self.GetCmdArgs()
arg_string = ", ".join(
["%s %s%s" % (arg.type, prefix, arg.name) for arg in args])
return self.__GetArgList(arg_string, add_comma)
def MakeCmdArgString(self, prefix, add_comma = False):
"""Gets the list of arguments as they need to be for command buffers."""
args = self.GetCmdArgs()
arg_string = ", ".join(
["%s%s" % (prefix, arg.name) for arg in args])
return self.__GetArgList(arg_string, add_comma)
def MakeTypedInitString(self, prefix, add_comma = False):
"""Gets a typed list of arguments as they need to be for cmd Init/Set."""
args = self.GetInitArgs()
arg_string = ", ".join(
["%s %s%s" % (arg.type, prefix, arg.name) for arg in args])
return self.__GetArgList(arg_string, add_comma)
def MakeInitString(self, prefix, add_comma = False):
"""Gets the list of arguments as they need to be for cmd Init/Set."""
args = self.GetInitArgs()
arg_string = ", ".join(
["%s%s" % (prefix, arg.name) for arg in args])
return self.__GetArgList(arg_string, add_comma)
def MakeLogArgString(self):
"""Makes a string of the arguments for the LOG macros"""
args = self.GetOriginalArgs()
return ' << ", " << '.join([arg.GetLogArg() for arg in args])
def WriteCommandDescription(self, file):
"""Writes a description of the command."""
file.Write("//! Command that corresponds to gl%s.\n" % self.original_name)
def WriteHandlerValidation(self, file):
"""Writes validation code for the function."""
for arg in self.GetOriginalArgs():
arg.WriteValidationCode(file, self)
self.WriteValidationCode(file)
def WriteHandlerImplementation(self, file):
"""Writes the handler implementation for this command."""
self.type_handler.WriteHandlerImplementation(self, file)
def WriteValidationCode(self, file):
"""Writes the validation code for a command."""
pass
def WriteCmdArgFlag(self, file):
"""Writes the cmd kArgFlags constant."""
file.Write(" static const cmd::ArgFlags kArgFlags = cmd::kFixed;\n")
def WriteCmdComputeSize(self, file):
"""Writes the ComputeSize function for the command."""
file.Write(" static uint32 ComputeSize() {\n")
file.Write(
" return static_cast<uint32>(sizeof(ValueType)); // NOLINT\n")
file.Write(" }\n")
file.Write("\n")
def WriteCmdSetHeader(self, file):
"""Writes the cmd's SetHeader function."""
file.Write(" void SetHeader() {\n")
file.Write(" header.SetCmd<ValueType>();\n")
file.Write(" }\n")
file.Write("\n")
def WriteCmdInit(self, file):
"""Writes the cmd's Init function."""
file.Write(" void Init(%s) {\n" % self.MakeTypedCmdArgString("_"))
file.Write(" SetHeader();\n")
args = self.GetCmdArgs()
for arg in args:
file.Write(" %s = _%s;\n" % (arg.name, arg.name))
file.Write(" }\n")
file.Write("\n")
def WriteCmdSet(self, file):
"""Writes the cmd's Set function."""
copy_args = self.MakeCmdArgString("_", False)
file.Write(" void* Set(void* cmd%s) {\n" %
self.MakeTypedCmdArgString("_", True))
file.Write(" static_cast<ValueType*>(cmd)->Init(%s);\n" % copy_args)
file.Write(" return NextCmdAddress<ValueType>(cmd);\n")
file.Write(" }\n")
file.Write("\n")
def WriteStruct(self, file):
self.type_handler.WriteStruct(self, file)
def WriteDocs(self, file):
self.type_handler.WriteDocs(self, file)
def WriteCmdHelper(self, file):
"""Writes the cmd's helper."""
self.type_handler.WriteCmdHelper(self, file)
def WriteServiceImplementation(self, file):
"""Writes the service implementation for a command."""
self.type_handler.WriteServiceImplementation(self, file)
def WriteServiceUnitTest(self, file):
"""Writes the service implementation for a command."""
self.type_handler.WriteServiceUnitTest(self, file)
def WriteGLES2CLibImplementation(self, file):
"""Writes the GLES2 C Lib Implemention."""
self.type_handler.WriteGLES2CLibImplementation(self, file)
def WriteGLES2InterfaceHeader(self, file):
"""Writes the GLES2 Interface declaration."""
self.type_handler.WriteGLES2InterfaceHeader(self, file)
def WriteGLES2InterfaceStub(self, file):
"""Writes the GLES2 Interface Stub declaration."""
self.type_handler.WriteGLES2InterfaceStub(self, file)
def WriteGLES2InterfaceStubImpl(self, file):
"""Writes the GLES2 Interface Stub declaration."""
self.type_handler.WriteGLES2InterfaceStubImpl(self, file)
def WriteGLES2ImplementationHeader(self, file):
"""Writes the GLES2 Implemention declaration."""
self.type_handler.WriteGLES2ImplementationHeader(self, file)
def WriteGLES2Implementation(self, file):
"""Writes the GLES2 Implemention definition."""
self.type_handler.WriteGLES2Implementation(self, file)
def WriteGLES2Header(self, file):
"""Writes the GLES2 Implemention unit test."""
self.type_handler.WriteGLES2Header(self, file)
def WriteGLES2ImplementationUnitTest(self, file):
"""Writes the GLES2 Implemention unit test."""
self.type_handler.WriteGLES2ImplementationUnitTest(self, file)
def WriteDestinationInitalizationValidation(self, file):
"""Writes the client side destintion initialization validation."""
self.type_handler.WriteDestinationInitalizationValidation(self, file)
def WriteFormatTest(self, file):
"""Writes the cmd's format test."""
self.type_handler.WriteFormatTest(self, file)
class PepperInterface(object):
"""A class that represents a function."""
def __init__(self, info):
self.name = info["name"]
self.dev = info["dev"]
def GetName(self):
return self.name
def GetInterfaceName(self):
upperint = ""
dev = ""
if self.name:
upperint = "_" + self.name.upper()
if self.dev:
dev = "_DEV"
return "PPB_OPENGLES2%s%s_INTERFACE" % (upperint, dev)
def GetInterfaceString(self):
dev = ""
if self.dev:
dev = "(Dev)"
return "PPB_OpenGLES2%s%s" % (self.name, dev)
def GetStructName(self):
dev = ""
if self.dev:
dev = "_Dev"
return "PPB_OpenGLES2%s%s" % (self.name, dev)
class ImmediateFunction(Function):
"""A class that represnets an immediate function command."""
def __init__(self, func):
new_args = []
for arg in func.GetOriginalArgs():
new_arg = arg.GetImmediateVersion()
if new_arg:
new_args.append(new_arg)
cmd_args = []
new_args_for_cmds = []
for arg in func.args_for_cmds:
new_arg = arg.GetImmediateVersion()
if new_arg:
new_args_for_cmds.append(new_arg)
new_arg.AddCmdArgs(cmd_args)
new_init_args = []
for arg in new_args_for_cmds:
arg.AddInitArgs(new_init_args)
Function.__init__(
self,
func.original_name,
"%sImmediate" % func.name,
func.info,
func.return_type,
new_args,
new_args_for_cmds,
cmd_args,
new_init_args,
0)
self.is_immediate = True
def WriteCommandDescription(self, file):
"""Overridden from Function"""
file.Write("//! Immediate version of command that corresponds to gl%s.\n" %
self.original_name)
def WriteServiceImplementation(self, file):
"""Overridden from Function"""
self.type_handler.WriteImmediateServiceImplementation(self, file)
def WriteHandlerImplementation(self, file):
"""Overridden from Function"""
self.type_handler.WriteImmediateHandlerImplementation(self, file)
def WriteServiceUnitTest(self, file):
"""Writes the service implementation for a command."""
self.type_handler.WriteImmediateServiceUnitTest(self, file)
def WriteValidationCode(self, file):
"""Overridden from Function"""
self.type_handler.WriteImmediateValidationCode(self, file)
def WriteCmdArgFlag(self, file):
"""Overridden from Function"""
file.Write(" static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;\n")
def WriteCmdComputeSize(self, file):
"""Overridden from Function"""
self.type_handler.WriteImmediateCmdComputeSize(self, file)
def WriteCmdSetHeader(self, file):
"""Overridden from Function"""
self.type_handler.WriteImmediateCmdSetHeader(self, file)
def WriteCmdInit(self, file):
"""Overridden from Function"""
self.type_handler.WriteImmediateCmdInit(self, file)
def WriteCmdSet(self, file):
"""Overridden from Function"""
self.type_handler.WriteImmediateCmdSet(self, file)
def WriteCmdHelper(self, file):
"""Overridden from Function"""
self.type_handler.WriteImmediateCmdHelper(self, file)
def WriteFormatTest(self, file):
"""Overridden from Function"""
self.type_handler.WriteImmediateFormatTest(self, file)
class BucketFunction(Function):
"""A class that represnets a bucket version of a function command."""
def __init__(self, func):
new_args = []
for arg in func.GetOriginalArgs():
new_arg = arg.GetBucketVersion()
if new_arg:
new_args.append(new_arg)
cmd_args = []
new_args_for_cmds = []
for arg in func.args_for_cmds:
new_arg = arg.GetBucketVersion()
if new_arg:
new_args_for_cmds.append(new_arg)
new_arg.AddCmdArgs(cmd_args)
new_init_args = []
for arg in new_args_for_cmds:
arg.AddInitArgs(new_init_args)
Function.__init__(
self,
func.original_name,
"%sBucket" % func.name,
func.info,
func.return_type,
new_args,
new_args_for_cmds,
cmd_args,
new_init_args,
0)
# def InitFunction(self):
# """Overridden from Function"""
# pass
def WriteCommandDescription(self, file):
"""Overridden from Function"""
file.Write("//! Bucket version of command that corresponds to gl%s.\n" %
self.original_name)
def WriteServiceImplementation(self, file):
"""Overridden from Function"""
self.type_handler.WriteBucketServiceImplementation(self, file)
def WriteHandlerImplementation(self, file):
"""Overridden from Function"""
self.type_handler.WriteBucketHandlerImplementation(self, file)
def WriteServiceUnitTest(self, file):
"""Writes the service implementation for a command."""
self.type_handler.WriteBucketServiceUnitTest(self, file)
def CreateArg(arg_string):
"""Creates an Argument."""
arg_parts = arg_string.split()
if len(arg_parts) == 1 and arg_parts[0] == 'void':
return None
# Is this a pointer argument?
elif arg_string.find('*') >= 0:
if arg_parts[0] == 'NonImmediate':
return NonImmediatePointerArgument(
arg_parts[-1],
" ".join(arg_parts[1:-1]))
else:
return PointerArgument(
arg_parts[-1],
" ".join(arg_parts[0:-1]))
# Is this a resource argument? Must come after pointer check.
elif arg_parts[0].startswith('GLidBind'):
return ResourceIdBindArgument(arg_parts[-1], " ".join(arg_parts[0:-1]))
elif arg_parts[0].startswith('GLidZero'):
return ResourceIdZeroArgument(arg_parts[-1], " ".join(arg_parts[0:-1]))
elif arg_parts[0].startswith('GLid'):
return ResourceIdArgument(arg_parts[-1], " ".join(arg_parts[0:-1]))
elif arg_parts[0].startswith('GLenum') and len(arg_parts[0]) > 6:
return EnumArgument(arg_parts[-1], " ".join(arg_parts[0:-1]))
elif arg_parts[0].startswith('GLboolean') and len(arg_parts[0]) > 9:
return ValidatedBoolArgument(arg_parts[-1], " ".join(arg_parts[0:-1]))
elif arg_parts[0].startswith('GLboolean'):
return BoolArgument(arg_parts[-1], " ".join(arg_parts[0:-1]))
elif arg_parts[0].startswith('GLintUniformLocation'):
return UniformLocationArgument(arg_parts[-1])
elif (arg_parts[0].startswith('GLint') and len(arg_parts[0]) > 5 and
not arg_parts[0].startswith('GLintptr')):
return IntArgument(arg_parts[-1], " ".join(arg_parts[0:-1]))
elif (arg_parts[0].startswith('GLsizeiNotNegative') or
arg_parts[0].startswith('GLintptrNotNegative')):
return SizeNotNegativeArgument(arg_parts[-1],
" ".join(arg_parts[0:-1]),
arg_parts[0][0:-11])
elif arg_parts[0].startswith('GLsize'):
return SizeArgument(arg_parts[-1], " ".join(arg_parts[0:-1]))
else:
return Argument(arg_parts[-1], " ".join(arg_parts[0:-1]))
class GLGenerator(object):
"""A class to generate GL command buffers."""
_function_re = re.compile(r'GL_APICALL(.*?)GL_APIENTRY (.*?) \((.*?)\);')
def __init__(self, verbose):
self.original_functions = []
self.functions = []
self.verbose = verbose
self.errors = 0
self._function_info = {}
self._empty_type_handler = TypeHandler()
self._empty_function_info = FunctionInfo({}, self._empty_type_handler)
self.pepper_interfaces = []
self.interface_info = {}
self._type_handlers = {
'Bind': BindHandler(),
'Create': CreateHandler(),
'Custom': CustomHandler(),
'Data': DataHandler(),
'Delete': DeleteHandler(),
'DELn': DELnHandler(),
'GENn': GENnHandler(),
'GETn': GETnHandler(),
'GLchar': GLcharHandler(),
'GLcharN': GLcharNHandler(),
'HandWritten': HandWrittenHandler(),
'Is': IsHandler(),
'Manual': ManualHandler(),
'PUT': PUTHandler(),
'PUTn': PUTnHandler(),
'PUTXn': PUTXnHandler(),
'StateSet': StateSetHandler(),
'StateSetRGBAlpha': StateSetRGBAlphaHandler(),
'StateSetFrontBack': StateSetFrontBackHandler(),
'StateSetFrontBackSeparate': StateSetFrontBackSeparateHandler(),
'STRn': STRnHandler(),
'Todo': TodoHandler(),
}
for func_name in _FUNCTION_INFO:
info = _FUNCTION_INFO[func_name]
type = ''
if 'type' in info:
type = info['type']
self._function_info[func_name] = FunctionInfo(info,
self.GetTypeHandler(type))
for interface in _PEPPER_INTERFACES:
interface = PepperInterface(interface)
self.pepper_interfaces.append(interface)
self.interface_info[interface.GetName()] = interface
def AddFunction(self, func):
"""Adds a function."""
self.functions.append(func)
def GetTypeHandler(self, name):
"""Gets a type info for the given type."""
if len(name):
if name in self._type_handlers:
return self._type_handlers[name]
else:
raise KeyError("no such type handler: %s" % name)
return self._empty_type_handler
def GetFunctionInfo(self, name):
"""Gets a type info for the given function name."""
if name in self._function_info:
return self._function_info[name]
return self._empty_function_info
def Log(self, msg):
"""Prints something if verbose is true."""
if self.verbose:
print msg
def Error(self, msg):
"""Prints an error."""
print "Error: %s" % msg
self.errors += 1
def WriteLicense(self, file):
"""Writes the license."""
file.Write(_LICENSE)
def WriteNamespaceOpen(self, file):
"""Writes the code for the namespace."""
file.Write("namespace gpu {\n")
file.Write("namespace gles2 {\n")
file.Write("\n")
def WriteNamespaceClose(self, file):
"""Writes the code to close the namespace."""
file.Write("} // namespace gles2\n")
file.Write("} // namespace gpu\n")
file.Write("\n")
def ParseArgs(self, arg_string):
"""Parses a function arg string."""
args = []
num_pointer_args = 0
parts = arg_string.split(',')
is_gl_enum = False
for arg_string in parts:
if arg_string.startswith('GLenum '):
is_gl_enum = True
arg = CreateArg(arg_string)
if arg:
args.append(arg)
if arg.IsPointer():
num_pointer_args += 1
return (args, num_pointer_args, is_gl_enum)
def ParseGLH(self, filename):
"""Parses the cmd_buffer_functions.txt file and extracts the functions"""
f = open("gpu/command_buffer/cmd_buffer_functions.txt", "r")
functions = f.read()
f.close()
for line in functions.splitlines():
match = self._function_re.match(line)
if match:
func_name = match.group(2)[2:]
func_info = self.GetFunctionInfo(func_name)
if func_info.type != 'Noop':
return_type = match.group(1).strip()
arg_string = match.group(3)
(args, num_pointer_args, is_gl_enum) = self.ParseArgs(arg_string)
# comment in to find out which functions use bare enums.
# if is_gl_enum:
# self.Log("%s uses bare GLenum" % func_name)
args_for_cmds = args
if hasattr(func_info, 'cmd_args'):
(args_for_cmds, num_pointer_args, is_gl_enum) = (
self.ParseArgs(getattr(func_info, 'cmd_args')))
cmd_args = []
for arg in args_for_cmds:
arg.AddCmdArgs(cmd_args)
init_args = []
for arg in args_for_cmds:
arg.AddInitArgs(init_args)
return_arg = CreateArg(return_type + " result")
if return_arg:
init_args.append(return_arg)
f = Function(func_name, func_name, func_info, return_type, args,
args_for_cmds, cmd_args, init_args, num_pointer_args)
self.original_functions.append(f)
gen_cmd = f.GetInfo('gen_cmd')
if gen_cmd == True or gen_cmd == None:
self.AddFunction(f)
f.type_handler.AddImmediateFunction(self, f)
f.type_handler.AddBucketFunction(self, f)
self.Log("Auto Generated Functions : %d" %
len([f for f in self.functions if f.can_auto_generate or
(not f.IsType('') and not f.IsType('Custom') and
not f.IsType('Todo'))]))
funcs = [f for f in self.functions if not f.can_auto_generate and
(f.IsType('') or f.IsType('Custom') or f.IsType('Todo'))]
self.Log("Non Auto Generated Functions: %d" % len(funcs))
for f in funcs:
self.Log(" %-10s %-20s gl%s" % (f.info.type, f.return_type, f.name))
def WriteCommandIds(self, filename):
"""Writes the command buffer format"""
file = CHeaderWriter(filename)
file.Write("#define GLES2_COMMAND_LIST(OP) \\\n")
id = 256
for func in self.functions:
file.Write(" %-60s /* %d */ \\\n" %
("OP(%s)" % func.name, id))
id += 1
file.Write("\n")
file.Write("enum CommandId {\n")
file.Write(" kStartPoint = cmd::kLastCommonId, "
"// All GLES2 commands start after this.\n")
file.Write("#define GLES2_CMD_OP(name) k ## name,\n")
file.Write(" GLES2_COMMAND_LIST(GLES2_CMD_OP)\n")
file.Write("#undef GLES2_CMD_OP\n")
file.Write(" kNumCommands\n")
file.Write("};\n")
file.Write("\n")
file.Close()
def WriteFormat(self, filename):
"""Writes the command buffer format"""
file = CHeaderWriter(filename)
for func in self.functions:
if True:
#gen_cmd = func.GetInfo('gen_cmd')
#if gen_cmd == True or gen_cmd == None:
func.WriteStruct(file)
file.Write("\n")
file.Close()
def WriteDocs(self, filename):
"""Writes the command buffer doc version of the commands"""
file = CWriter(filename)
for func in self.functions:
if True:
#gen_cmd = func.GetInfo('gen_cmd')
#if gen_cmd == True or gen_cmd == None:
func.WriteDocs(file)
file.Write("\n")
file.Close()
def WriteFormatTest(self, filename):
"""Writes the command buffer format test."""
file = CHeaderWriter(
filename,
"// This file contains unit tests for gles2 commmands\n"
"// It is included by gles2_cmd_format_test.cc\n"
"\n")
for func in self.functions:
if True:
#gen_cmd = func.GetInfo('gen_cmd')
#if gen_cmd == True or gen_cmd == None:
func.WriteFormatTest(file)
file.Close()
def WriteCmdHelperHeader(self, filename):
"""Writes the gles2 command helper."""
file = CHeaderWriter(filename)
for func in self.functions:
if True:
#gen_cmd = func.GetInfo('gen_cmd')
#if gen_cmd == True or gen_cmd == None:
func.WriteCmdHelper(file)
file.Close()
def WriteServiceContextStateHeader(self, filename):
"""Writes the service context state header."""
file = CHeaderWriter(
filename,
"// It is included by context_state.h\n")
file.Write("struct EnableFlags {\n")
file.Write(" EnableFlags();\n")
for capability in _CAPABILITY_FLAGS:
file.Write(" bool %s;\n" % capability['name'])
file.Write("};\n\n")
for state_name in sorted(_STATES.keys()):
state = _STATES[state_name]
for item in state['states']:
file.Write("%s %s;\n" % (item['type'], item['name']))
file.Write("\n")
file.Close()
def WriteClientContextStateHeader(self, filename):
"""Writes the client context state header."""
file = CHeaderWriter(
filename,
"// It is included by client_context_state.h\n")
file.Write("struct EnableFlags {\n")
file.Write(" EnableFlags();\n")
for capability in _CAPABILITY_FLAGS:
file.Write(" bool %s;\n" % capability['name'])
file.Write("};\n\n")
file.Close()
def WriteContextStateGetters(self, file, class_name):
"""Writes the state getters."""
for gl_type in ["GLint", "GLfloat"]:
file.Write("""
bool %s::GetStateAs%s(
GLenum pname, %s* params, GLsizei* num_written) const {
switch (pname) {
""" % (class_name, gl_type, gl_type))
for state_name in _STATES.keys():
state = _STATES[state_name]
if 'enum' in state:
file.Write(" case %s:\n" % state['enum'])
file.Write(" *num_written = %d;\n" % len(state['states']))
file.Write(" if (params) {\n")
for ndx,item in enumerate(state['states']):
file.Write(" params[%d] = static_cast<%s>(%s);\n" %
(ndx, gl_type, item['name']))
file.Write(" }\n")
file.Write(" return true;\n")
else:
for item in state['states']:
file.Write(" case %s:\n" % item['enum'])
file.Write(" *num_written = 1;\n")
file.Write(" if (params) {\n")
file.Write(" params[0] = static_cast<%s>(%s);\n" %
(gl_type, item['name']))
file.Write(" }\n")
file.Write(" return true;\n")
for capability in _CAPABILITY_FLAGS:
file.Write(" case GL_%s:\n" % capability['name'].upper())
file.Write(" *num_written = 1;\n")
file.Write(" if (params) {\n")
file.Write(
" params[0] = static_cast<%s>(enable_flags.%s);\n" %
(gl_type, capability['name']))
file.Write(" }\n")
file.Write(" return true;\n")
file.Write(""" default:
return false;
}
}
""")
def WriteServiceContextStateImpl(self, filename):
"""Writes the context state service implementation."""
file = CHeaderWriter(
filename,
"// It is included by context_state.cc\n")
code = []
for capability in _CAPABILITY_FLAGS:
code.append("%s(%s)" %
(capability['name'],
('false', 'true')['default' in capability]))
file.Write("ContextState::EnableFlags::EnableFlags()\n : %s {\n}\n" %
",\n ".join(code))
file.Write("\n")
file.Write("void ContextState::Initialize() {\n")
for state_name in sorted(_STATES.keys()):
state = _STATES[state_name]
for item in state['states']:
file.Write(" %s = %s;\n" % (item['name'], item['default']))
file.Write("}\n")
file.Write("""
void ContextState::InitCapabilities() const {
""")
for capability in _CAPABILITY_FLAGS:
file.Write(" EnableDisable(GL_%s, enable_flags.%s);\n" %
(capability['name'].upper(), capability['name']))
file.Write("""}
void ContextState::InitState() const {
""")
# We need to sort the keys so the expectations match
for state_name in sorted(_STATES.keys()):
state = _STATES[state_name]
if state['type'] == 'FrontBack':
num_states = len(state['states'])
for ndx, group in enumerate(Grouper(num_states / 2, state['states'])):
args = []
for item in group:
args.append('%s' % item['name'])
file.Write(
" gl%s(%s, %s);\n" %
(state['func'], ('GL_FRONT', 'GL_BACK')[ndx], ", ".join(args)))
else:
args = []
for item in state['states']:
args.append('%s' % item['name'])
file.Write(" gl%s(%s);\n" % (state['func'], ", ".join(args)))
file.Write("}\n")
file.Write("""bool ContextState::GetEnabled(GLenum cap) const {
switch (cap) {
""")
for capability in _CAPABILITY_FLAGS:
file.Write(" case GL_%s:\n" % capability['name'].upper())
file.Write(" return enable_flags.%s;\n" % capability['name'])
file.Write(""" default:
GPU_NOTREACHED();
return false;
}
}
""")
self.WriteContextStateGetters(file, "ContextState")
file.Close()
def WriteClientContextStateImpl(self, filename):
"""Writes the context state client side implementation."""
file = CHeaderWriter(
filename,
"// It is included by client_context_state.cc\n")
code = []
for capability in _CAPABILITY_FLAGS:
code.append("%s(%s)" %
(capability['name'],
('false', 'true')['default' in capability]))
file.Write(
"ClientContextState::EnableFlags::EnableFlags()\n : %s {\n}\n" %
",\n ".join(code))
file.Write("\n")
file.Write("""
bool ClientContextState::SetCapabilityState(
GLenum cap, bool enabled, bool* changed) {
*changed = false;
switch (cap) {
""")
for capability in _CAPABILITY_FLAGS:
file.Write(" case GL_%s:\n" % capability['name'].upper())
file.Write(""" if (enable_flags.%(name)s != enabled) {
*changed = true;
enable_flags.%(name)s = enabled;
}
return true;
""" % capability)
file.Write(""" default:
return false;
}
}
""")
file.Write("""bool ClientContextState::GetEnabled(
GLenum cap, bool* enabled) const {
switch (cap) {
""")
for capability in _CAPABILITY_FLAGS:
file.Write(" case GL_%s:\n" % capability['name'].upper())
file.Write(" *enabled = enable_flags.%s;\n" % capability['name'])
file.Write(" return true;\n")
file.Write(""" default:
return false;
}
}
""")
file.Close()
def WriteServiceImplementation(self, filename):
"""Writes the service decorder implementation."""
file = CHeaderWriter(
filename,
"// It is included by gles2_cmd_decoder.cc\n")
for func in self.functions:
if True:
#gen_cmd = func.GetInfo('gen_cmd')
#if gen_cmd == True or gen_cmd == None:
func.WriteServiceImplementation(file)
file.Write("""
bool GLES2DecoderImpl::SetCapabilityState(GLenum cap, bool enabled) {
switch (cap) {
""")
for capability in _CAPABILITY_FLAGS:
file.Write(" case GL_%s:\n" % capability['name'].upper())
if 'state_flag' in capability:
file.Write(""" if (state_.enable_flags.%(name)s != enabled) {
state_.enable_flags.%(name)s = enabled;
%(state_flag)s = true;
}
return false;
""" % capability)
else:
file.Write(""" state_.enable_flags.%(name)s = enabled;
return true;
""" % capability)
file.Write(""" default:
NOTREACHED();
return false;
}
}
""")
file.Close()
def WriteServiceUnitTests(self, filename):
"""Writes the service decorder unit tests."""
num_tests = len(self.functions)
FUNCTIONS_PER_FILE = 98 # hard code this so it doesn't change.
count = 0
for test_num in range(0, num_tests, FUNCTIONS_PER_FILE):
count += 1
name = filename % count
file = CHeaderWriter(
name,
"// It is included by gles2_cmd_decoder_unittest_%d.cc\n" % count)
file.SetFileNum(count)
end = test_num + FUNCTIONS_PER_FILE
if end > num_tests:
end = num_tests
for idx in range(test_num, end):
func = self.functions[idx]
if True:
#gen_cmd = func.GetInfo('gen_cmd')
#if gen_cmd == True or gen_cmd == None:
if func.GetInfo('unit_test') == False:
file.Write("// TODO(gman): %s\n" % func.name)
else:
func.WriteServiceUnitTest(file)
file.Close()
file = CHeaderWriter(
filename % 0,
"// It is included by gles2_cmd_decoder_unittest_base.cc\n")
file.Write(
"""void GLES2DecoderTestBase::SetupInitCapabilitiesExpectations() {
""")
for capability in _CAPABILITY_FLAGS:
file.Write(" ExpectEnableDisable(GL_%s, %s);\n" %
(capability['name'].upper(),
('false', 'true')['default' in capability]))
file.Write("""}
void GLES2DecoderTestBase::SetupInitStateExpectations() {
""")
# We need to sort the keys so the expectations match
for state_name in sorted(_STATES.keys()):
state = _STATES[state_name]
if state['type'] == 'FrontBack':
num_states = len(state['states'])
for ndx, group in enumerate(Grouper(num_states / 2, state['states'])):
args = []
for item in group:
if 'expected' in item:
args.append(item['expected'])
else:
args.append(item['default'])
file.Write(
" EXPECT_CALL(*gl_, %s(%s, %s))\n" %
(state['func'], ('GL_FRONT', 'GL_BACK')[ndx], ", ".join(args)))
file.Write(" .Times(1)\n")
file.Write(" .RetiresOnSaturation();\n")
else:
args = []
for item in state['states']:
if 'expected' in item:
args.append(item['expected'])
else:
args.append(item['default'])
file.Write(" EXPECT_CALL(*gl_, %s(%s))\n" %
(state['func'], ", ".join(args)))
file.Write(" .Times(1)\n")
file.Write(" .RetiresOnSaturation();\n")
file.Write("""}
""")
file.Close()
def WriteGLES2Header(self, filename):
"""Writes the GLES2 header."""
file = CHeaderWriter(
filename,
"// This file contains Chromium-specific GLES2 declarations.\n\n")
for func in self.original_functions:
func.WriteGLES2Header(file)
file.Write("\n")
file.Close()
def WriteGLES2CLibImplementation(self, filename):
"""Writes the GLES2 c lib implementation."""
file = CHeaderWriter(
filename,
"// These functions emulate GLES2 over command buffers.\n")
for func in self.original_functions:
func.WriteGLES2CLibImplementation(file)
file.Write("""
namespace gles2 {
NameToFunc g_gles2_function_table[] = {
""")
for func in self.original_functions:
file.Write(
' { "gl%s", reinterpret_cast<GLES2FunctionPointer>(gl%s), },\n' %
(func.name, func.name))
file.Write(""" { NULL, NULL, },
};
} // namespace gles2
""")
file.Close()
def WriteGLES2InterfaceHeader(self, filename):
"""Writes the GLES2 interface header."""
file = CHeaderWriter(
filename,
"// This file is included by gles2_interface.h to declare the\n"
"// GL api functions.\n")
for func in self.original_functions:
func.WriteGLES2InterfaceHeader(file)
file.Close()
def WriteGLES2InterfaceStub(self, filename):
"""Writes the GLES2 interface stub header."""
file = CHeaderWriter(
filename,
"// This file is included by gles2_interface_stub.h.\n")
for func in self.original_functions:
func.WriteGLES2InterfaceStub(file)
file.Close()
def WriteGLES2InterfaceStubImpl(self, filename):
"""Writes the GLES2 interface header."""
file = CHeaderWriter(
filename,
"// This file is included by gles2_interface_stub.cc.\n")
for func in self.original_functions:
func.WriteGLES2InterfaceStubImpl(file)
file.Close()
def WriteGLES2ImplementationHeader(self, filename):
"""Writes the GLES2 Implementation header."""
file = CHeaderWriter(
filename,
"// This file is included by gles2_implementation.h to declare the\n"
"// GL api functions.\n")
for func in self.original_functions:
func.WriteGLES2ImplementationHeader(file)
file.Close()
def WriteGLES2Implementation(self, filename):
"""Writes the GLES2 Implementation."""
file = CHeaderWriter(
filename,
"// This file is included by gles2_implementation.cc to define the\n"
"// GL api functions.\n")
for func in self.original_functions:
func.WriteGLES2Implementation(file)
file.Close()
def WriteGLES2ImplementationUnitTests(self, filename):
"""Writes the GLES2 helper header."""
file = CHeaderWriter(
filename,
"// This file is included by gles2_implementation.h to declare the\n"
"// GL api functions.\n")
for func in self.original_functions:
func.WriteGLES2ImplementationUnitTest(file)
file.Close()
def WriteServiceUtilsHeader(self, filename):
"""Writes the gles2 auto generated utility header."""
file = CHeaderWriter(filename)
for enum in sorted(_ENUM_LISTS.keys()):
file.Write("ValueValidator<%s> %s;\n" %
(_ENUM_LISTS[enum]['type'], ToUnderscore(enum)))
file.Write("\n")
file.Close()
def WriteServiceUtilsImplementation(self, filename):
"""Writes the gles2 auto generated utility implementation."""
file = CHeaderWriter(filename)
enums = sorted(_ENUM_LISTS.keys())
for enum in enums:
if len(_ENUM_LISTS[enum]['valid']) > 0:
file.Write("static %s valid_%s_table[] = {\n" %
(_ENUM_LISTS[enum]['type'], ToUnderscore(enum)))
for value in _ENUM_LISTS[enum]['valid']:
file.Write(" %s,\n" % value)
file.Write("};\n")
file.Write("\n")
file.Write("Validators::Validators()\n")
pre = ': '
post = ','
for count, enum in enumerate(enums):
if count + 1 == len(enums):
post = ' {'
if len(_ENUM_LISTS[enum]['valid']) > 0:
code = """ %(pre)s%(name)s(
valid_%(name)s_table, arraysize(valid_%(name)s_table))%(post)s
"""
else:
code = """ %(pre)s%(name)s()%(post)s
"""
file.Write(code % {
'name': ToUnderscore(enum),
'pre': pre,
'post': post,
})
pre = ' '
file.Write("}\n\n");
file.Close()
def WriteCommonUtilsHeader(self, filename):
"""Writes the gles2 common utility header."""
file = CHeaderWriter(filename)
enums = sorted(_ENUM_LISTS.keys())
for enum in enums:
if _ENUM_LISTS[enum]['type'] == 'GLenum':
file.Write("static std::string GetString%s(uint32 value);\n" % enum)
file.Write("\n")
file.Close()
def WriteCommonUtilsImpl(self, filename):
"""Writes the gles2 common utility header."""
enum_re = re.compile(r'\#define\s+(GL_[a-zA-Z0-9_]+)\s+([0-9A-Fa-fx]+)')
dict = {}
for fname in ['../../third_party/khronos/GLES2/gl2.h',
'../../third_party/khronos/GLES2/gl2ext.h',
'../../gpu/GLES2/gl2chromium.h',
'../../gpu/GLES2/gl2extchromium.h']:
lines = open(fname).readlines()
for line in lines:
m = enum_re.match(line)
if m:
name = m.group(1)
value = m.group(2)
if len(value) <= 10 and not value in dict:
dict[value] = name
file = CHeaderWriter(filename)
file.Write("static GLES2Util::EnumToString enum_to_string_table[] = {\n")
for value in dict:
file.Write(' { %s, "%s", },\n' % (value, dict[value]))
file.Write("""};
const GLES2Util::EnumToString* GLES2Util::enum_to_string_table_ =
enum_to_string_table;
const size_t GLES2Util::enum_to_string_table_len_ =
sizeof(enum_to_string_table) / sizeof(enum_to_string_table[0]);
""")
enums = sorted(_ENUM_LISTS.keys())
for enum in enums:
if _ENUM_LISTS[enum]['type'] == 'GLenum':
file.Write("std::string GLES2Util::GetString%s(uint32 value) {\n" %
enum)
if len(_ENUM_LISTS[enum]['valid']) > 0:
file.Write(" static EnumToString string_table[] = {\n")
for value in _ENUM_LISTS[enum]['valid']:
file.Write(' { %s, "%s" },\n' % (value, value))
file.Write(""" };
return GLES2Util::GetQualifiedEnumString(
string_table, arraysize(string_table), value);
}
""")
else:
file.Write(""" return GLES2Util::GetQualifiedEnumString(
NULL, 0, value);
}
""")
file.Close()
def WritePepperGLES2Interface(self, filename, dev):
"""Writes the Pepper OpenGLES interface definition."""
file = CHeaderWriter(
filename,
"// OpenGL ES interface.\n",
2)
file.Write("#include \"ppapi/c/pp_resource.h\"\n")
if dev:
file.Write("#include \"ppapi/c/ppb_opengles2.h\"\n\n")
else:
file.Write("\n#ifndef __gl2_h_\n")
for (k, v) in _GL_TYPES.iteritems():
file.Write("typedef %s %s;\n" % (v, k))
file.Write("#endif // __gl2_h_\n\n")
for interface in self.pepper_interfaces:
if interface.dev != dev:
continue
file.Write("#define %s_1_0 \"%s;1.0\"\n" %
(interface.GetInterfaceName(), interface.GetInterfaceString()))
file.Write("#define %s %s_1_0\n" %
(interface.GetInterfaceName(), interface.GetInterfaceName()))
file.Write("\nstruct %s {\n" % interface.GetStructName())
for func in self.original_functions:
if not func.InPepperInterface(interface):
continue
original_arg = func.MakeTypedOriginalArgString("")
context_arg = "PP_Resource context"
if len(original_arg):
arg = context_arg + ", " + original_arg
else:
arg = context_arg
file.Write(" %s (*%s)(%s);\n" % (func.return_type, func.name, arg))
file.Write("};\n\n")
file.Close()
def WritePepperGLES2Implementation(self, filename):
"""Writes the Pepper OpenGLES interface implementation."""
file = CWriter(filename)
file.Write(_LICENSE)
file.Write(_DO_NOT_EDIT_WARNING)
file.Write("#include \"ppapi/shared_impl/ppb_opengles2_shared.h\"\n\n")
file.Write("#include \"base/logging.h\"\n")
file.Write("#include \"gpu/command_buffer/client/gles2_implementation.h\"\n")
file.Write("#include \"ppapi/shared_impl/ppb_graphics_3d_shared.h\"\n")
file.Write("#include \"ppapi/thunk/enter.h\"\n\n")
file.Write("namespace ppapi {\n\n")
file.Write("namespace {\n\n")
file.Write("gpu::gles2::GLES2Implementation*"
" GetGLES(PP_Resource context) {\n")
file.Write(" thunk::EnterResource<thunk::PPB_Graphics3D_API>"
" enter_g3d(context, false);\n")
file.Write(" DCHECK(enter_g3d.succeeded());\n")
file.Write(" return static_cast<PPB_Graphics3D_Shared*>"
"(enter_g3d.object())->gles2_impl();\n")
file.Write("}\n\n")
for func in self.original_functions:
if not func.InAnyPepperExtension():
continue
original_arg = func.MakeTypedOriginalArgString("")
context_arg = "PP_Resource context_id"
if len(original_arg):
arg = context_arg + ", " + original_arg
else:
arg = context_arg
file.Write("%s %s(%s) {\n" % (func.return_type, func.name, arg))
return_str = "" if func.return_type == "void" else "return "
file.Write(" %sGetGLES(context_id)->%s(%s);\n" %
(return_str, func.original_name,
func.MakeOriginalArgString("")))
file.Write("}\n\n")
file.Write("} // namespace\n")
for interface in self.pepper_interfaces:
file.Write("const %s* PPB_OpenGLES2_Shared::Get%sInterface() {\n" %
(interface.GetStructName(), interface.GetName()))
file.Write(" static const struct %s "
"ppb_opengles2 = {\n" % interface.GetStructName())
file.Write(" &")
file.Write(",\n &".join(
f.name for f in self.original_functions
if f.InPepperInterface(interface)))
file.Write("\n")
file.Write(" };\n")
file.Write(" return &ppb_opengles2;\n")
file.Write("}\n")
file.Write("} // namespace ppapi\n")
file.Close()
def WriteGLES2ToPPAPIBridge(self, filename):
"""Connects GLES2 helper library to PPB_OpenGLES2 interface"""
file = CWriter(filename)
file.Write(_LICENSE)
file.Write(_DO_NOT_EDIT_WARNING)
file.Write("#ifndef GL_GLEXT_PROTOTYPES\n")
file.Write("#define GL_GLEXT_PROTOTYPES\n")
file.Write("#endif\n")
file.Write("#include <GLES2/gl2.h>\n")
file.Write("#include <GLES2/gl2ext.h>\n")
file.Write("#include \"ppapi/lib/gl/gles2/gl2ext_ppapi.h\"\n\n")
for func in self.original_functions:
if not func.InAnyPepperExtension():
continue
interface = self.interface_info[func.GetInfo('pepper_interface') or '']
file.Write("%s GL_APIENTRY gl%s(%s) {\n" %
(func.return_type, func.name,
func.MakeTypedOriginalArgString("")))
return_str = "" if func.return_type == "void" else "return "
interface_str = "glGet%sInterfacePPAPI()" % interface.GetName()
original_arg = func.MakeOriginalArgString("")
context_arg = "glGetCurrentContextPPAPI()"
if len(original_arg):
arg = context_arg + ", " + original_arg
else:
arg = context_arg
if interface.GetName():
file.Write(" const struct %s* ext = %s;\n" %
(interface.GetStructName(), interface_str))
file.Write(" if (ext)\n")
file.Write(" %sext->%s(%s);\n" %
(return_str, func.name, arg))
if return_str:
file.Write(" %s0;\n" % return_str)
else:
file.Write(" %s%s->%s(%s);\n" %
(return_str, interface_str, func.name, arg))
file.Write("}\n\n")
file.Close()
def main(argv):
"""This is the main function."""
parser = OptionParser()
parser.add_option(
"-g", "--generate-implementation-templates", action="store_true",
help="generates files that are generally hand edited..")
parser.add_option(
"--alternate-mode", type="choice",
choices=("ppapi", "chrome_ppapi", "chrome_ppapi_proxy", "nacl_ppapi"),
help="generate files for other projects. \"ppapi\" will generate ppapi "
"bindings. \"chrome_ppapi\" generate chrome implementation for ppapi. "
"\"chrome_ppapi_proxy\" will generate the glue for the chrome IPC ppapi"
"proxy. \"nacl_ppapi\" will generate NaCl implementation for ppapi")
parser.add_option(
"--output-dir",
help="base directory for resulting files, under chrome/src. default is "
"empty. Use this if you want the result stored under gen.")
parser.add_option(
"-v", "--verbose", action="store_true",
help="prints more output.")
(options, args) = parser.parse_args(args=argv)
# Add in states and capabilites to GLState
for state_name in sorted(_STATES.keys()):
state = _STATES[state_name]
if 'enum' in state:
_ENUM_LISTS['GLState']['valid'].append(state['enum'])
else:
for item in state['states']:
_ENUM_LISTS['GLState']['valid'].append(item['enum'])
for capability in _CAPABILITY_FLAGS:
_ENUM_LISTS['GLState']['valid'].append("GL_%s" % capability['name'].upper())
# This script lives under gpu/command_buffer, cd to base directory.
os.chdir(os.path.dirname(__file__) + "/../..")
gen = GLGenerator(options.verbose)
gen.ParseGLH("common/GLES2/gl2.h")
# Support generating files under gen/
if options.output_dir != None:
os.chdir(options.output_dir)
if options.alternate_mode == "ppapi":
# To trigger this action, do "make ppapi_gles_bindings"
os.chdir("ppapi");
gen.WritePepperGLES2Interface("c/ppb_opengles2.h", False)
gen.WritePepperGLES2Interface("c/dev/ppb_opengles2ext_dev.h", True)
gen.WriteGLES2ToPPAPIBridge("lib/gl/gles2/gles2.c")
elif options.alternate_mode == "chrome_ppapi":
# To trigger this action, do "make ppapi_gles_implementation"
gen.WritePepperGLES2Implementation(
"ppapi/shared_impl/ppb_opengles2_shared.cc")
else:
os.chdir("gpu/command_buffer")
gen.WriteCommandIds("common/gles2_cmd_ids_autogen.h")
gen.WriteFormat("common/gles2_cmd_format_autogen.h")
gen.WriteFormatTest("common/gles2_cmd_format_test_autogen.h")
gen.WriteGLES2InterfaceHeader("client/gles2_interface_autogen.h")
gen.WriteGLES2InterfaceStub("client/gles2_interface_stub_autogen.h")
gen.WriteGLES2InterfaceStubImpl(
"client/gles2_interface_stub_impl_autogen.h")
gen.WriteGLES2ImplementationHeader("client/gles2_implementation_autogen.h")
gen.WriteGLES2Implementation("client/gles2_implementation_impl_autogen.h")
gen.WriteGLES2ImplementationUnitTests(
"client/gles2_implementation_unittest_autogen.h")
gen.WriteGLES2CLibImplementation("client/gles2_c_lib_autogen.h")
gen.WriteCmdHelperHeader("client/gles2_cmd_helper_autogen.h")
gen.WriteServiceImplementation("service/gles2_cmd_decoder_autogen.h")
gen.WriteServiceContextStateHeader("service/context_state_autogen.h")
gen.WriteServiceContextStateImpl("service/context_state_impl_autogen.h")
gen.WriteClientContextStateHeader("client/client_context_state_autogen.h")
gen.WriteClientContextStateImpl(
"client/client_context_state_impl_autogen.h")
gen.WriteServiceUnitTests("service/gles2_cmd_decoder_unittest_%d_autogen.h")
gen.WriteServiceUtilsHeader("service/gles2_cmd_validation_autogen.h")
gen.WriteServiceUtilsImplementation(
"service/gles2_cmd_validation_implementation_autogen.h")
gen.WriteCommonUtilsHeader("common/gles2_cmd_utils_autogen.h")
gen.WriteCommonUtilsImpl("common/gles2_cmd_utils_implementation_autogen.h")
gen.WriteGLES2Header("../GLES2/gl2chromium_autogen.h")
if gen.errors > 0:
print "%d errors" % gen.errors
return 1
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| {
"content_hash": "d1fb944d70e33795276329a703ce97bb",
"timestamp": "",
"source": "github",
"line_count": 7522,
"max_line_length": 81,
"avg_line_length": 31.7420898697155,
"alnum_prop": 0.5977366772210216,
"repo_name": "nacl-webkit/chrome_deps",
"id": "1c2b3929151a4182a449c4a5072b83b1c012495a",
"size": "238764",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gpu/command_buffer/build_gles2_cmd_buffer.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "1173441"
},
{
"name": "Awk",
"bytes": "9519"
},
{
"name": "C",
"bytes": "74568368"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "156174457"
},
{
"name": "DOT",
"bytes": "1559"
},
{
"name": "F#",
"bytes": "381"
},
{
"name": "Java",
"bytes": "3088381"
},
{
"name": "JavaScript",
"bytes": "18179048"
},
{
"name": "Logos",
"bytes": "4517"
},
{
"name": "M",
"bytes": "2190"
},
{
"name": "Matlab",
"bytes": "3044"
},
{
"name": "Objective-C",
"bytes": "6965520"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "932725"
},
{
"name": "Python",
"bytes": "8458718"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Ragel in Ruby Host",
"bytes": "3621"
},
{
"name": "Shell",
"bytes": "1526176"
},
{
"name": "Tcl",
"bytes": "277077"
},
{
"name": "XSLT",
"bytes": "13493"
}
],
"symlink_target": ""
} |
import csv
import urlparse
import sys, os
# Per http://stackoverflow.com/questions/2850893/reading-binary-data-from-stdin
# Because csv apparently wants binary data
sys.stdin = os.fdopen(sys.stdin.fileno(), 'rb', 0)
mycsv = csv.DictReader(sys.stdin)
sites = {}
for data in mycsv:
isites = set()
warnings = set()
for d in data['Title'], data['URL']:
if not d: continue
# Auto-generated passwords from LastPass that were never renamed
if d.lower().startswith('generated password for '):
d = d[len('generated password for '):]
#if 'sample entry' in d.lower() or d.lower().startswith('sample '):
# continue
# Parenthetical comments trailing the domain name
if '(' in d:
d = d[0:d.find('(')].strip()
# Strip URL components, keep hostname only
if '://' in d:
d = urlparse.urlparse(d).netloc
elif '/' in d:
d = d[0:d.find('/')].strip()
if ':' in d:
d = d[0:d.rfind(':')].strip()
if not d: continue
# Skip private IP addresses
if d.startswith('192.') or d.startswith('10.'):
warnings.add("Skipping private IP address '%s'" % (d,))
continue
# Handle '<realm> <hostname>' entries
if ' ' in d:
lw = d[d.rfind(' ')+1:]
if not d.endswith('.') and '.' in lw:
warnings.add("Taking last word from name '%s'" % (d,))
d = lw
else:
warnings.add("Not parsing name '%s'" % (d,))
continue
c = d.strip().split('.')
# Better handling of ccTLDs
tldsize = 1
if len(c[-1]) == 2 and c[-2] in ('co', 'com'):
tldsize = 2
for i in range(len(c)-tldsize):
s = '.'.join(c[i:])
isites.add(s)
if warnings:
sys.stderr.write('%d warnings, found %d domains\n %s\n' %
(len(warnings), len(isites), '\n '.join(warnings)))
for s in isites:
if s not in sites:
sites[s] = []
sites[s].append(data['Username'])
for site, usernames in sorted(sites.items(), key=lambda x: '.'.join(reversed(x[0].split('.')))):
print '\\(^\\|\\.\\)%s$' % (site,)
#print "%s\t%s" % (site, ' '.join(usernames))
| {
"content_hash": "d9ae382c6eb3d8e16ebd0b473f267ba6",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 96,
"avg_line_length": 32.44444444444444,
"alnum_prop": 0.5115582191780822,
"repo_name": "nandhp/misc-utils",
"id": "bd581f1bad5f9d154731e2633c4e9dd5d1e1e3ff",
"size": "3028",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keepassx_domains.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "11453"
},
{
"name": "Makefile",
"bytes": "1100"
},
{
"name": "Perl",
"bytes": "17669"
},
{
"name": "Python",
"bytes": "22054"
},
{
"name": "Shell",
"bytes": "9043"
}
],
"symlink_target": ""
} |
import codecs
import os
import unittest
import tempfile
from tracing_build import vulcanize_trace_viewer
class Trace2HTMLTests(unittest.TestCase):
def testWriteHTMLForTracesToFile(self):
try:
# Note: We can't use "with" when working with tempfile.NamedTemporaryFile
# as that does not work on Windows. We use the longer, more clunky version
# instead. See https://bugs.python.org/issue14243 for detials.
raw_tmpfile = tempfile.NamedTemporaryFile(
mode='w', suffix='.html', delete=False)
raw_tmpfile.close()
with codecs.open(raw_tmpfile.name, 'w', encoding='utf-8') as tmpfile:
vulcanize_trace_viewer.WriteTraceViewer(tmpfile)
finally:
os.remove(raw_tmpfile.name)
| {
"content_hash": "da881bf3f3467a5a1ba919484b806036",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 80,
"avg_line_length": 35.04761904761905,
"alnum_prop": 0.7146739130434783,
"repo_name": "catapult-project/catapult",
"id": "b602388eb4a6e2593bd7ad18db4b616e20031f32",
"size": "903",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "tracing/tracing_build/vulcanize_trace_viewer_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1324"
},
{
"name": "C++",
"bytes": "46069"
},
{
"name": "CSS",
"bytes": "23376"
},
{
"name": "Dockerfile",
"bytes": "1541"
},
{
"name": "Go",
"bytes": "114396"
},
{
"name": "HTML",
"bytes": "12394298"
},
{
"name": "JavaScript",
"bytes": "1559584"
},
{
"name": "Makefile",
"bytes": "1774"
},
{
"name": "Python",
"bytes": "6778695"
},
{
"name": "Shell",
"bytes": "2288"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'jigsaw'
copyright = '2017, nint8835'
author = 'nint8835'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.1.3'
# The full version, including alpha/beta/rc tags.
release = '2.1.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'jigsawdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'jigsaw.tex', 'jigsaw Documentation',
'nint8835', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'jigsaw', 'jigsaw Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'jigsaw', 'jigsaw Documentation',
author, 'jigsaw', 'One line description of project.',
'Miscellaneous'),
]
| {
"content_hash": "6371f8c2bb155756cfb637f42bbebc90",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 78,
"avg_line_length": 29.391304347826086,
"alnum_prop": 0.6587771203155819,
"repo_name": "nint8835/jigsaw",
"id": "7ea07c32a483517ae689aa79205ac499bfbfb530",
"size": "4738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22964"
}
],
"symlink_target": ""
} |
from . import cli # noqa
| {
"content_hash": "11d98121143e72c306953a3c770d9279",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 25,
"avg_line_length": 26,
"alnum_prop": 0.6538461538461539,
"repo_name": "lucuma/Clay",
"id": "338df62ccab866e91e459353deb3488ab6885142",
"size": "26",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "clay/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1255"
},
{
"name": "HTML",
"bytes": "2490"
},
{
"name": "Makefile",
"bytes": "316"
},
{
"name": "Python",
"bytes": "39177"
}
],
"symlink_target": ""
} |
from django.test import TestCase
class PackageTests(TestCase):
def setUp(self):
pass
def test_imports(self):
import django
import pytz
import rest_framework
import graphene
import graphene_django
| {
"content_hash": "e0e40c6802dd690c4de0e7f2a0b75414",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 32,
"avg_line_length": 17.058823529411764,
"alnum_prop": 0.5586206896551724,
"repo_name": "foohooboo/graphql-cookiecutter",
"id": "4468ab26cf0e70bf0f6d27971a3f925eb0e715ab",
"size": "290",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphql_cookiecutter/contrib/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10027"
},
{
"name": "HTML",
"bytes": "14605"
},
{
"name": "JavaScript",
"bytes": "1307"
},
{
"name": "Nginx",
"bytes": "992"
},
{
"name": "Python",
"bytes": "64809"
},
{
"name": "Shell",
"bytes": "9821"
}
],
"symlink_target": ""
} |
import sqlite3 as lite
import sys
#from scipy.stats import linregress
from numpy import empty
import math
import datetime
class DataManager:
def __init__(self):
self.con = lite.connect('data.db')
cur = self.con.cursor()
# Table for storing date, time, blood glucose value, carbs, bolus, and notes
cur.execute("CREATE TABLE IF NOT EXISTS Data(Id INTEGER PRIMARY KEY, DateColumn date, Bg INT, Carbs INT, Bolus INT, Notes Text)")
# Table for storing points with which to calibrate the meter.
cur.execute("CREATE TABLE IF NOT EXISTS CalibData(ADC INT, Actual INT)")
# Adds a new data point to the "Data" table
def new_entry(self, date, bg, carbs, bolus, notes):
self.con.execute("INSERT INTO data(DateColumn, Bg, Carbs, Bolus, Notes) VALUES ('"+date+"',"+str(bg)+","+str(carbs)+","+str(bolus)+",'"+notes+"')")
self.con.commit()
# Deletes an entry from the "Data" table
def delete_entry(self, date, bg, carbs, bolus, notes ):
self.con.execute("DELETE FROM data WHERE Id =(SELECT MIN(Id) FROM data WHERE DateColumn='%s' AND Bg=%d AND Carbs =%d AND Bolus =%d AND Notes='%s')" % (date, bg, carbs, bolus, notes))
self.con.commit()
# Adds a new data point to the "CabibData" table
def new_calib_entry(self, adc, actual):
self.con.execute("INSERT INTO CalibData(ADC, Actual) VALUES ("+str(adc)+","+str(actual)+")")
self.con.commit()
# Calculates linear regression on the "CalibData" table in the database. Returns line as a lambda object
def get_line(self):
rows = self.get_whole_table("CalibData")
x = empty([len(rows)])
y = empty([len(rows)])
index = 0
for row in rows:
x[index] = row["ADC"]
y[index] = row["Actual"]
index += 1
slope, intercept, r_value, p_value, std_err = linregress(x,y)
return lambda x: slope*x + intercept
# Returns the requested table as a dictonary object
def get_whole_table(self, table):
with self.con:
self.con.row_factory = lite.Row
cur = self.con.cursor()
cur.execute("SELECT * FROM " + table)
#SELECT * FROM data ORDER BY datetime(dateColumn);
return cur.fetchall()
# Returns the requested table ordered by a column named datetime
def get_whole_table_sorted(self, table):
with self.con:
self.con.row_factory = lite.Row
cur = self.con.cursor()
cur.execute("SELECT * FROM " + table + " ORDER BY datetime(dateColumn)")
return cur.fetchall()
# Deletes the sqlite table passed
def delete_table(self, table):
cur = self.con.cursor()
cur.execute("DROP TABLE IF EXISTS " + table)
# Sorts Table into chronological order TODO doesn't work
def sort_data_table(self):
data_table = self.get_whole_table('Data')
datetime_list = []
for entry in data_table:
datetime_list.append(self.str_to_date(str(entry["Date"]),str(entry["Time"])))
datetime_list.sort()
#self.delete_table("Data")
for entry in datetime_list:
self.new
return datetime_list
# Converts strings in the format m/d/y or m/d/y, h:m to a datetime object TODO depreciated
def str_to_date(self, strdate):
if '/' in strdate:
split_date = strdate.split('/')
m = int(split_date[0])
d = int(split_date[1])
y = int(split_date[2])
h = 0
mins = 0
if y < 100:
y = int('20' + str(y))
else:
try:
dateobj = datetime.datetime.strptime( strdate, "%Y-%m-%d %H:%M" )
return dateobj
except:
pass
try:
dateobj = datetime.datetime.strptime( strdate, "%Y-%m-%d" )
return dateobj
except:
pass
return datetime.datetime(year=y, month=m, day=d, hour=h, minute=mins)
# Testing stuff
if __name__ == "__main__":
bgm = DataManager()
rows = bgm.get_whole_table("Data")
data = (
('2016-12-01 11:44', 98, 0, 0, 'bg of 98'),
('2016-11-03 03:45', 98, 36, 9, 'bg of 98'),
('2016-11-03 12:45', 94, 24, 6, 'same notes'),
('2016-11-03 23:45', 112, 26, 7, 'notes these are them'),
('2016-10-04 23:45', 86, 13, 3, 'aeu'),
('2016-09-05 14:45', 134, 6, 2, 'none'),
('2016-10-06 23:45', 99, 6, 2, 'it was 99 today'),
('2016-10-07 23:45', 109, 12, 3, 'tomorrow is 140'),
('2016-11-08 12:45', 103, 140, 35, 'wow thats high'),
('2016-11-09 23:45', 109, 60, 15, 'testing'),
('2016-11-10 23:45', 94, 44, 11, '44, 11'),
('2016-11-03 18:45', 117, 6, 2, 'notesnotesnotes'),
('2016-11-03 19:45', 117, 6, 2, 'notesnotesnotes'),
('2016-11-04 12:45', 111, 26, 7, ' '),
('2016-11-04 20:45', 117, 6, 2, 'notesnotesnotes'),
('2016-11-05 21:45', 111, 26, 7, ' '),
('2016-11-05 22:45', 111, 26, 7, ' ')
)
#table = bgm.sort_data_table()
table = bgm.get_whole_table_sorted("Data")
for thing in table:
print thing
bgm.delete_table('Data')
bgm = DataManager()
for point in data:
bgm.new_entry(point[0],point[1],point[2],point[3], point[4])
#for row in rows:
# print "%s %s %s" % (row["Date"], row["Bg"], row["Carbs"])
#print point[0]
#bgm.delete_table('calibdata')
#bgm.new_calib_entry(1, 1)
#bgm.new_calib_entry(20, -100)
#test = bgm.get_line()
#print test(3700)
| {
"content_hash": "7e4033f9243a0c9c67c439a5ca75eb70",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 190,
"avg_line_length": 35.65408805031446,
"alnum_prop": 0.5597107073557946,
"repo_name": "johnyburd/glucometer",
"id": "9f1bf559a11c36e5839144464a191ddf134523f1",
"size": "5792",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "classes/data_manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36328"
},
{
"name": "Ruby",
"bytes": "6989"
}
],
"symlink_target": ""
} |
import flask
from app import app
@app.route('/')
def index():
return flask.render_template('index.html', action="")
@app.route('/control/<action>')
def control(action):
actions = ["prev", "play", "pause", "stop", "next"]
if action not in actions:
flask.abort()
return flask.render_template("index.html", action=action)
@app.errorhandler(404)
def internal_error(error):
return flask.render_template('404.html'), 404
| {
"content_hash": "ee2209adb155b5888f8e34af0852d448",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 58,
"avg_line_length": 21.75,
"alnum_prop": 0.6850574712643678,
"repo_name": "hellerbarde/clampdown",
"id": "47805bf59ee7931150fa37fcc6c2b73e55931280",
"size": "435",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2001"
},
{
"name": "Shell",
"bytes": "192"
}
],
"symlink_target": ""
} |
"""Command for resetting an instance."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import utils
class Reset(base_classes.NoOutputAsyncMutator):
"""Reset a virtual machine instance."""
@staticmethod
def Args(parser):
utils.AddZoneFlag(
parser,
resource_type='instance',
operation_type='reset')
parser.add_argument(
'name',
nargs='+',
completion_resource='compute.instances',
help='The names of the instances to reset.')
@property
def service(self):
return self.compute.instances
@property
def method(self):
return 'Reset'
@property
def resource_type(self):
return 'instances'
def CreateRequests(self, args):
request_list = []
for name in args.name:
instance_ref = self.CreateZonalReference(name, args.zone)
request = self.messages.ComputeInstancesResetRequest(
instance=instance_ref.Name(),
project=self.project,
zone=instance_ref.zone)
request_list.append(request)
return request_list
def Display(self, _, resources):
# There is no need to display anything when resetting an
# instance. Instead, we consume the generator returned from Run()
# to invoke the logic that waits for the reset to complete.
list(resources)
Reset.detailed_help = {
'brief': 'Reset a virtual machine instance',
'DESCRIPTION': """\
*{command}* is used to perform a hard reset on a Google
Compute Engine virtual machine.
""",
}
| {
"content_hash": "ca17a8566b924875ec79ac23ee10284c",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 69,
"avg_line_length": 26.166666666666668,
"alnum_prop": 0.6643312101910828,
"repo_name": "flgiordano/netcash",
"id": "972b53d951f54b72a43583155bcf13f8cf82ab48",
"size": "2165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "+/google-cloud-sdk/lib/surface/compute/instances/reset.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "622"
},
{
"name": "HTML",
"bytes": "33831"
},
{
"name": "JavaScript",
"bytes": "13859"
},
{
"name": "Shell",
"bytes": "2716"
}
],
"symlink_target": ""
} |
import json, sys, re, random
from bs4 import BeautifulSoup
from collections import Counter # Bag implementation
from random import choice
from datetime import datetime
import csv
from type_mapping import case_map, organization_map, method_map
from type_mapping import case_delist, method_delist, organization_delist
from type_mapping import case_kill_list, method_kill_list, organization_kill_list
DATE_FMT = re.compile(r'\d{4}-\d{2}-\d{2}')
INT_FMT = re.compile(r'^\d+$')
FLOAT_FMT = re.compile(r'^\d+\.\d+$')
WHITESPACE_FMT = re.compile(r'\s+')
COMMA_NOT_IN_PARENS = re.compile(r',\s*(?![^()]*\))')
def get_unicode(s):
'Convert HTML entities and strip tags'
return BeautifulSoup(s, 'lxml').get_text().replace('&', '&')
def get_isodate(s):
if not s: return s
return datetime.fromtimestamp(int(s)).isoformat()
# db is an array of dicts read in from JSON
def keys(db):
# use last example because early examples are often incomplete
return choice(db[-1]).keys()
# db is an array of dicts read in from JSON
# returns a Counter instance, which has a useful .most_common(x) method
def values(db, key):
return Counter([u.get(key, 'key error') for u in db])
# db is an array of dicts read in from JSON
def get_by_id(db, id):
for item in db:
if item['id'] == id:
return item
# convert to upgrade a field in all instances
def update(db, key, fn, newkey=None):
if not newkey:
newkey = key
for item in db:
item[newkey] = fn(item.get(key, None))
if key != newkey and item.get(key, None):
del item[key]
# Data Schema work that is needed:
# who_was_primarily_responsible_for_organizing_the_initiative -> ORGREF
# type_of_funding_entity: fixed_list + other
# typical_implementing_entity: should be renamed "type_of_implementing_entity"
# same for typical_supporting_entity
def rename_fields(object, fieldmap):
for (from_, to_) in fieldmap:
object[to_] = object[from_]
del object[from_]
def rename_specific_fields(obj):
rename_fields(obj, (
('if_yes_were_they', 'facilitator_type'),
('other_facilitation', 'facilitator_type_other'),
('other_demographics', 'demographics_other'),
('other_public_roles', 'targeted_participants_other'),
('other_method_of_recruitment', 'method_of_recruitment_other'),
('other_decision_method', 'decision_method_other'),
('kind_of_influence','purpose'),
('other_intended_purposes','purpose_other'),
('geo_street', 'geo_address1'),
('additional', 'geo_address2')
))
def uprint(text=''):
print text.encode('utf-8')
type_map = None # load this for each filename
def known_type(key):
return type_map[key]
voting_map = {
None: u'none',
u'Consensus': u'consensus',
u'Majoritarian Voting (i.e. 50%+1)': u'majority',
u'Preferential Voting (i.e. ranking preferences)': 'preferential',
u'Super-Majoritarian Voting (i.e. threshold more than 50%+1)': 'supermajority'
}
def interpreted_value(obj, key):
expected_type = known_type(key)
raw_value = obj.get(key, None)
if key == u'if_voting':
return voting_map[raw_value]
if is_empty(raw_value):
return None
if expected_type == u'triple':
if ',' in raw_value:
return 'both'
return raw_value.lower().replace('-', '')
if expected_type == u'list':
return list_to_list(raw_value)
elif expected_type == u'json_dict':
return json.loads(raw_value)
elif expected_type == u'html-encoded dict':
return html_to_list_of_dict(raw_value)
elif expected_type == u'unicode':
return get_unicode(raw_value)
elif expected_type == u'int':
return int(float(raw_value))
elif expected_type == u'float':
return float(raw_value)
# elif expected_type == u'rich text':
# # shorten it for ease of viewing
# return WHITESPACE_FMT.sub(' ', raw_value).strip()[:40]
elif expected_type == u'rich text':
return raw_value.replace('&', '&')
return raw_value
def interpreted_language_value(obj, key):
langs = {
u'Deutsch - German': 'de',
u'English': 'en',
u'Italiano - Italian': 'it',
u'Español - Spanish': 'es',
u'Français - French': 'fr',
u'Português - Portuguese': 'pt'
}
lang_val = interpreted_value(obj, key)
if type(lang_val) == list:
return [langs[z] for z in lang_val]
if type(lang_val) == unicode:
return [langs[lang_val]]
if lang_val == None:
return ['en']
0 / 3
def only_one(obj, key):
value = interpreted_value(obj, key)
if type(value) == list:
return choice(value)
return value
def is_empty(value):
if value == None:
return True
if type(value) == unicode and value.strip() in (u'', u'{}', u'[]'):
return True
return False
def guess_type(key, value):
t = type(value).__name__
if key.startswith('body_'):
return 'rich text'
if key == u'file_attachments':
return u'html-encoded dict'
if t in (u'str', u'unicode'):
if not value.strip():
return u'empty'
if value in (u'{}', u'[]'):
return u'empty'
if value[0] == '{':
return u'json_dict'
if value[0] == '[':
return u'comma-delimited list' # really a json list
if DATE_FMT.match(value):
return u'ISO 8601 date'
if INT_FMT.match(value):
return u'int'
if FLOAT_FMT.match(value):
return u'float'
if u'<p>' in value or u'<div' in value:
return u'rich text'
if value in (u'Yes', u'No'):
return 'boolean'
if re.search(COMMA_NOT_IN_PARENS, value):
return 'comma-delimited list'
if u'°' in value:
return 'lat/long'
return t
def best_of(type1, type2):
a,b = sorted([type1, type2])
if a == b:
return a
if a == u'empty': return b
if b == u'empty': return a
if a == u'comma-delimited list':
return a
if a == u'int' and b in (u'str', u'unicode'):
return b
if a == u'float' and b in (u'str', u'unicode'):
return b
if a == 'lat/long':
return a
if a == 'rich text':
return a
raise Exception(u'Iconsistent types: {} != {}'.format(a, b))
def html_to_list_of_dict(value):
soup = BeautifulSoup(value, 'lxml')
spans = soup.find_all(u'span')
return [{u'path': span.a[u'href'],
u'length': int(span.a[u'type'].split('=')[-1]),
u'title': ''.join(list(span.stripped_strings))
} for span in spans]
def list_to_list(value):
retval = value
if value and value[0] == '[':
return json.loads(value)
if value and re.search(COMMA_NOT_IN_PARENS, value):
return [x.strip() for x in re.split(COMMA_NOT_IN_PARENS, value.strip(u'[]'))]
if value and '<span' in value:
return html_dict_to_list_of_dict(value)
return [retval]
def count_record(results, prefix, record):
for shortkey,value in record.items():
key = prefix + shortkey
result = results.setdefault(key, {
u'count': 0,
u'type': 'empty',
u'examples': set(),
u'array_values': []
})
value_type = guess_type(shortkey, value)
if value and value_type != u'empty':
if value_type == u'json_dict':
count_record(results, u"{}{}.".format(prefix, key), json_dict_to_dict(value))
result[u'count'] += 1
result[u'type'] = best_of(result[u'type'], value_type)
if value_type == u'rich text':
result[u'examples'].add(WHITESPACE_FMT.sub(' ', value).strip()[:40])
elif value_type == u'html-encoded dict':
for span_value in html_to_list_of_dict(value):
count_record(results, u"{}{}.".format(prefix, key), span_value)
result[u'examples'].add(json.dumps(span_value))
elif value_type == u'comma-delimited list':
arrv = list_to_list(value)
if type(arrv) != list:
die_violently_in_a_fire()
for val in arrv:
result[u'examples'].add(val)
result[u'array_values'].append(sorted(arrv))
else:
result[u'array_values'].append(value)
def stats(struct):
records = len(struct)
results = {}
# populate results
for record in struct:
count_record(results, u'', record)
for result in results.values():
result[u'percent'] = result[u'count'] * 100 // records
result[u'examples'] = list(result[u'examples'])
return results
def jsonify(filename):
return json.loads(open(filename).read())
def print_header(text):
uprint(u'#' * (len(text) + 4))
uprint(u'#{}#'.format(' ' * (len(text) + 2)))
uprint(u'# {} #'.format(text))
uprint(u'#{}#'.format(' ' * (len(text) + 2)))
uprint(u'#' * (len(text) + 4))
uprint()
def print_stats(filename, results):
print_header('{} tags'.format(filename))
for key in sorted(results.keys()):
value = results[key]
uprint('"{}":'.format(key))
uprint(u"\tused {}% of the time ({} total uses)".format(value[u'percent'], value[u'count']))
uprint(u"\tbest guess of type: {}".format(value[u'type']))
examples = value[u'examples']
if examples:
if len(examples) > 8:
uprint(u'\t{} uniques examples ({}%):'.format(len(examples), len(examples) * 100 // value[u'count']))
uprint(u"\t\t{}".format(random.choice(examples)))
uprint(u"\t\t{}".format(random.choice(examples)))
uprint(u"\t\t{}".format(random.choice(examples)))
else:
uprint(u'\tchoices:')
for example in examples:
uprint(u"\t\t{}".format(example))
uprint()
def print_under_ten_percent(filename, results):
print_header('{} tags'.format(filename))
for key in sorted(results.keys()):
value = results[key]
if 0 < value[u'percent'] < 10:
uprint(u'"{}" used {}% of the time ({} total uses)'.format(key, value[u'percent'], value[u'count']))
def print_zero_usage(filename, results):
print_header('{} tags'.format(filename))
for key in sorted(results.keys()):
value = results[key]
if value[u'percent'] == 0:
uprint(u'"{}" used {}% of the time ({} total uses)'.format(key, value[u'percent'], value[u'count']))
def print_conversion_mapping(filename, results):
print_header('{} tags'.format(filename))
uprint(u'{')
for key in sorted(results.keys()):
value = results[key]
if value[u'percent'] == 0:
uprint(u'"{}": "{}",'.format(key, 'kill'))
elif value[u'percent'] < 10:
uprint(u'"{}: "{}",'.format(key, 'tbd'))
elif value[u'type'] == 'comma-delimited list':
uprint(u'"{}: "{}",'.format(key, 'external_table'))
else:
uprint(u'"{}"="{}",'.format(key, value[u'type']))
uprint(u'}')
def print_values(filename, results):
print_header('{} values'.format(filename))
uprint(u'{')
for key in sorted(results.keys()):
if key in [
u'average_annual_budget',
u'geographical_scope',
u'other_methods_of_recruitment',
u'targeted_audience',
u'method_of_communication_with_audience'
]:
value = results[key]
print key
for example in value[u'examples']:
print '\t', example
def print_sql(filename, results):
print_header(u'{} schema'.format(filename))
curr_map = {
'cases.json': case_map,
'methods.json': method_map,
'organizations.json': organization_map
}[filename]
for key in sorted(results.keys()):
pass
def print_list_usage(filename, results):
print_header(u'{} lists'.format(filename))
for key in sorted(results.keys()):
value = results[key]
if value[u'type'] == u'comma-delimited list':
arrv = sorted(value[u'array_values'])
arrv_len = len(arrv)
count = {}
uprint('{}:'.format(key))
for v in arrv:
arrkey = u', '.join(v)
count[arrkey] = count.get(arrkey, 0) + 1
for ckey in count:
percent = count[ckey] * 100 / arrv_len
uprint(u'\t{} used {}% ({} times)'.format(ckey, percent, count[ckey]))
elif value[u'type'] == u'boolean':
# print value[u'array_values']
truth = len([x for x in value[u'array_values'] if x == 'Yes'])
uprint('{} is "Yes" {}% of the time ({} of {})'.format(key, truth * 100 / len(value[u'array_values']), truth, len(value[u'array_values'])))
def get_fieldnames(objs):
fieldnames = set()
for obj in objs:
fieldnames.update(obj.iterkeys())
return sorted(list(fieldnames))
def encode_obj(obj):
for key, value in obj.iteritems():
if value == None:
obj[key] = ''
if type(value) in [int, float, bool]:
pass
else:
obj[key] = obj[key].encode('utf-8')
return obj
def print_csv(name):
objs = tables[name]
filename = 'migrations/{}.csv'.format(name)
with open(filename, 'w') as csvfile:
fieldnames = get_fieldnames(objs)
writer = csv.DictWriter(csvfile, fieldnames=fieldnames, extrasaction='ignore')
writer.writeheader()
for obj in objs:
writer.writerow(encode_obj(obj))
add_copy_command(name, fieldnames)
def add_copy_command(name, fieldnames):
copy_commands.append("\\COPY {0} ({1}) FROM 'migrations/{0}.csv' WITH CSV HEADER".format(name, ','.join(fieldnames)))
def print_key_usage(filename, objs):
print_header(u'{} value types for keys of {} records'.format(filename, len(objs)))
keys = get_fieldnames(objs)
usage = {}
values = {}
for key in keys:
usage[key] = Counter()
values[key] = Counter()
for obj in objs:
apparent_type = guess_type(key, obj.get(key, None))
working_type = known_type(key)
usage[key][apparent_type] += 1
value = interpreted_value(obj, key)
if value and working_type == u'list':
if not type(value) == list:
print >> sys.stderr, u'{}: {} is not a list'.format(key, value)
values[key].update(value) # update counts for every item in list
elif value and working_type == u'json_dict':
values[key][u'<dict>'] += 1
else:
values[key][value] += 1
for key in keys:
# skip these, keeping them anyway
if key in (u'id', u'body_en', u'title_en', u'file_attachments', u'lead_image', u'other_images', 'author_uid',
'path', 'post_date', 'updated_date', 'video', 'stages'):
continue
if key.startswith('geo_'):
continue
uprint('\n')
uprint(key)
uprint(u'\ttypes as found in data')
for name, count in usage[key].most_common():
uprint(u'\t\t{}: {}'.format(name, count))
# Don't bother listing out these values, not helpful
if key in ('end_date', 'end_date_not_applicable', 'start_date', 'who_was_primarily_responsible_for_organizing_the_initiative'):
continue
uprint(u'\tnon-unique values')
for name, count in values[key].most_common():
if count > 1:
uprint(u'\t\t{}: {}'.format(name, count))
uprint(u'\tunique values')
for name, count in values[key].most_common():
if count == 1:
uprint(u'\t\t{}'.format(name))
tables = {}
tables['videos'] = videos = jsonify('v2.1/videos.json')
tables['users'] = users = jsonify('v2.1/users.json')
tables['geolocation'] = geolocation_table = []
tables['cases'] = cases_table = []
tables['case__localized_texts'] = case__localized_texts_table = []
tables['case__authors'] = case__authors_table = []
tables['case__attachments'] = case__attachments_table = []
tables['case__tags'] = case__tags_table = []
tables['case__videos'] = case__videos_table = []
tables['case__methods'] = case__methods_table = []
tables['methods'] = methods_table = []
tables['method__localized_texts'] = method__localized_texts_table = []
tables['method__authors'] = method__authors_table = []
tables['method__attachments'] = method__attachments_table = []
tables['method__tags'] = method__tags_table = []
tables['method__videos'] = method__videos_table = []
tables['organizations'] = organizations_table = []
tables['organization__localized_texts'] = organization__localized_texts_table = []
tables['organization__authors'] = organization__authors_table = []
tables['organization__attachments'] = organization__attachments_table = []
tables['organization__tags'] = organization__tags_table = []
tables['organization__videos'] = organization__videos_table = []
copy_commands = []
geo_count = 0
def geolocation_to_entity(obj):
global geo_count
geo_count += 1
return {
'id': geo_count,
'name': obj.get('name', ''),
'address1': obj.get('geo_street', ''),
'address2': obj.get('additional', ''),
'city': obj.get('geo_city', ''),
'province': obj.get('geo_province', ''),
'country': obj.get('geo_country', ''),
'postal_code': obj.get('geo_postal_code', ''),
'latitude': obj.get('geo_latitude', ''),
'longitude': obj.get('geo_longitude', '')
}
def attachment_to_entity(attachment, filetype, ref_id, ref='case_id', is_lead=False):
if type(attachment) == unicode:
url = attachment
attachment = {
'src': url
}
entity = {
'url': attachment.get('src', '') or attachment.get('path'),
'title': attachment.get('title', ''),
'type': attachment.get('type', filetype),
'size': attachment.get('length', ''),
'is_lead': is_lead
}
entity[ref] = ref_id
return entity
def case_to_entity(case):
location = geolocation_to_entity(case)
geolocation_table.append(location)
case__localized_texts_table.append({
'body': interpreted_value(case, 'body_en'),
'title': interpreted_value(case, 'title_en'),
'language': u'en',
'case_id': case['id']
})
case__authors_table.append({
'author': case['author_uid'],
'timestamp': interpreted_value(case, 'updated_date'),
'case_id': case['id']
})
# handle all three attachment fields
image = interpreted_value(case, 'lead_image')
if image:
case__attachments_table.append(attachment_to_entity(image, 'image', case['id'], 'case_id', True))
for image in interpreted_value(case, 'other_images') or []:
case__attachments_table.append(attachment_to_entity(image, 'image', case['id'], 'case_id',))
for attachment in interpreted_value(case, 'file_attachments') or []:
case__attachments_table.append(attachment_to_entity(attachment, 'file', case['id'], 'case_id',))
for tag in interpreted_value(case, 'tags') or []:
case__tags_table.append({'tag': tag, 'case_id': case['id']})
for vid in interpreted_value(case, 'video') or []:
video = get_by_id(videos, vid)
case__videos_table.append({'url': video['url'], 'title': video['title'], 'case_id': case['id']})
return {
'id': case['id'],
'original_language': choice(interpreted_language_value(case, 'content_language')),
'issue': only_one(case, 'issue'),
'communication_mode': only_one(case, 'communication_mode'),
'communication_with_audience': only_one(case, 'method_of_communication_with_audience'),
'content_country': only_one(case, 'content_country'),
'decision_method': only_one(case, 'decision_methods'),
'end_date': interpreted_value(case, 'end_date'),
'facetoface_online_or_both': interpreted_value(case, 'facetoface_online_or_both'),
'facilitated': interpreted_value(case, 'facilitated'),
'location': location['id'],
'voting': interpreted_value(case, 'if_voting'),
'number_of_meeting_days': interpreted_value(case, 'number_of_meeting_days'),
'ongoing': interpreted_value(case, 'ongoing'),
'post_date': interpreted_value(case, 'post_date'),
'published': interpreted_value(case, 'published'),
'start_date': interpreted_value(case, 'start_date'),
'total_number_of_participants': interpreted_value(case, 'total_number_of_participants'),
'updated_date': interpreted_value(case, 'updated_date'),
'targeted_participant_demographic': only_one(case, 'targeted_participants_demographics'),
'kind_of_influence': only_one(case, 'kind_of_influence'),
'targeted_participants_public_role': only_one(case, 'targeted_participants_public_roles'),
'targeted_audience': only_one(case, 'targeted_audience'),
'participant_selection': only_one(case, 'participant_selection'),
'specific_topic': only_one(case, 'specific_topics'),
'staff_type': only_one(case, 'staff_type'),
'type_of_funding_entity': only_one(case, 'type_of_funding_entity'),
'typical_implementing_entity': only_one(case, 'typical_implementing_entity'),
'typical_sponsoring_entity': only_one(case, 'typical_sponsoring_entity'),
'who_else_supported_the_initiative': only_one(case, 'who_else_supported_the_initiative'),
'who_was_primarily_responsible_for_organizing_the_initiative': interpreted_value(case, 'who_was_primarily_responsible_for_organizing_the_initiative')
}
def method_to_entity(method):
method__localized_texts_table.append({
'body': interpreted_value(method, 'body_en'),
'title': interpreted_value(method, 'title_en'),
'language': u'en',
'method_id': method['id']
})
method__authors_table.append({
'author': method['author_id'],
'timestamp': interpreted_value(method, 'updated_date'),
'method_id': method['id']
})
# handle all three attachment fields
image = interpreted_value(method, 'lead_image')
if image:
method__attachments_table.append(attachment_to_entity(image, 'image', method['id'], 'method_id', True))
for image in interpreted_value(method, 'other_images') or []:
method__attachments_table.append(attachment_to_entity(image, 'image', method['id'], 'method_id'))
for attachment in interpreted_value(method, 'file_attachments') or []:
method__attachments_table.append(attachment_to_entity(attachment, 'file', method['id'], 'method_id'))
# handle tags
for tag in interpreted_value(method, 'tags') or []:
method__tags_table.append({'tag': tag, 'method_id': method['id']})
# handle videos (always linked externally, never uploaded)
for vid in interpreted_value(method, 'video') or []:
video = get_by_id(videos, vid)
method__videos_table.append({'url': video['url'], 'title': video['title'], 'method_id': method['id']})
return {
'id': method['id'],
'original_language': choice(interpreted_language_value(method, 'content_language')),
'best_for': only_one(method, 'best_for'),
'communication_mode': only_one(method, 'communication_mode'),
'decision_method': only_one(method, 'decision_methods'),
'facilitated': interpreted_value(method, 'facilitated'),
'governance_contribution': only_one(method, 'governance_contributions'),
'issue_interdependency': interpreted_value(method, 'issue_interdependency'),
'issue_polarization': interpreted_value(method, 'issue_polarization'),
'issue_technical_complexity': interpreted_value(method, 'issue_technical_complexity'),
'kind_of_influence': only_one(method, 'kind_of_influence'),
'method_of_interaction': only_one(method, 'methods_of_interaction'),
'public_interaction_method': only_one(method, 'public_interaction_methods'),
'post_date': interpreted_value(method, 'post_date'),
'published': interpreted_value(method, 'published'),
'typical_funding_source': only_one(method, 'typical_funding_source'),
'typical_sponsoring_entity': only_one(method, 'typical_sponsoring_entity'),
'updated_date': interpreted_value(method, 'updated_date')
}
def organization_to_entity(organization):
location = geolocation_to_entity(organization)
geolocation_table.append(location)
organization__localized_texts_table.append({
'body': interpreted_value(organization, 'body_en'),
'title': interpreted_value(organization, 'title_en'),
'language': u'en',
'organization_id': organization['id']
})
organization__authors_table.append({
'author': organization['author_uid'],
'timestamp': interpreted_value(organization, 'updated_date'),
'organization_id': organization['id']
})
# handle all three attachment fields
image = interpreted_value(organization, 'lead_image')
if image:
organization__attachments_table.append(attachment_to_entity(image, 'image', organization['id'], 'organization_id', True))
for image in interpreted_value(organization, 'other_images') or []:
organization__attachments_table.append(attachment_to_entity(image, 'image', organization['id'], 'organization_id'))
for attachment in interpreted_value(organization, 'file_attachments') or []:
organization__attachments_table.append(attachment_to_entity(attachment, 'file', organization['id'], 'organization_id'))
for tag in interpreted_value(organization, 'tags') or []:
organization__tags_table.append({'tag': tag, 'organization_id': organization['id']})
for vid in interpreted_value(organization, 'video') or []:
video = get_by_id(videos, vid)
organization__videos_table.append({'url': video['url'], 'title': video['title'], 'organization_id': organization['id']})
return {
'id': organization['id'],
'original_language': choice(interpreted_language_value(organization, 'content_language')),
'executive_director': organization['executive_director'],
'issue': only_one(organization, 'issue'),
'location': location['id'],
'post_date': interpreted_value(organization, 'post_date'),
'sector': organization.get('sector', ''),
'published': interpreted_value(organization, 'published'),
'updated_date': interpreted_value(organization, 'updated_date')
}
def fail_loudly(table):
for row in table:
for key, val in row.items():
if type(val) in (list, dict):
print >> sys.stderr, 'Error: {} is a {}: {}'.format(key, type(val), val)
raise Exception
def write_case_table_data():
global type_map
type_map = case_map
for case in jsonify('v2.1/cases.json'):
cases_table.append(case_to_entity(case))
fail_loudly(cases_table)
print_csv('cases')
print_csv('case__localized_texts')
print_csv('case__authors')
print_csv('case__attachments')
print_csv('case__tags')
print_csv('case__videos')
def write_method_table_data():
global type_map
type_map = method_map
for method in jsonify('v2.1/methods.json'):
methods_table.append(method_to_entity(method))
fail_loudly(methods_table)
print_csv('methods')
print_csv('method__localized_texts')
print_csv('method__authors')
print_csv('method__attachments')
print_csv('method__tags')
print_csv('method__videos')
def write_organization_table_data():
global type_map
type_map = organization_map
for organization in jsonify('v2.1/organizations.json'):
organizations_table.append(organization_to_entity(organization))
fail_loudly(organizations_table)
print_csv('organizations')
print_csv('organization__localized_texts', )
print_csv('organization__authors')
print_csv('organization__attachments')
print_csv('organization__tags')
print_csv('organization__videos')
if __name__ == '__main__':
# global type_map
# for filename in sys.argv[1:]:
# if 'case' in filename:
# type_map = case_map
# elif 'organization' in filename:
# type_map = organization_map
# elif 'method' in filename:
# type_map = method_map
# else:
# type_map = None
# print_stats(filename, stats(jsonify(filename)))
# print_under_ten_percent(filename, stats(jsonify(filename)))
# print_zero_usage(filename, stats(jsonify(filename)))
# print_conversion_mapping(filename, stats(jsonify(filename)))
#print_values(filename, stats(jsonify(filename)))
# print_list_usage(filename, stats(jsonify(filename)))
# print_csv(filename, jsonify(filename))
# print_key_usage(filename, jsonify(filename))
print_csv('users')
write_case_table_data()
write_method_table_data()
write_organization_table_data()
print_csv('geolocation')
with open('migrations/copy_commmands.sql', 'w') as ccfile:
for cmd in copy_commands:
print >> ccfile, cmd
| {
"content_hash": "f15fe6da207dc65c81700af389f05ca1",
"timestamp": "",
"source": "github",
"line_count": 729,
"max_line_length": 157,
"avg_line_length": 40.489711934156375,
"alnum_prop": 0.6011789816038215,
"repo_name": "participedia/data-transport",
"id": "0e16e6e2973dddb46471f434f50fdb0bafe1387e",
"size": "29561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "json_stats.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "19689"
},
{
"name": "Python",
"bytes": "52520"
},
{
"name": "Shell",
"bytes": "51740"
}
],
"symlink_target": ""
} |
from livestreamer.compat import urlparse
from livestreamer.exceptions import PluginError, NoStreamsError
from livestreamer.plugin import Plugin
from livestreamer.stream import RTMPStream
from livestreamer.utils import urlget
import re
class Mips(Plugin):
SWFURL = "http://mips.tv/content/scripts/eplayer.swf"
PlayerURL = "http://mips.tv/embedplayer/{0}/1/500/400"
BalancerURL = "http://www.mips.tv:1935/loadbalancer"
@classmethod
def can_handle_url(self, url):
return "mips.tv" in url
def _get_streams(self):
channelname = urlparse(self.url).path.rstrip("/").rpartition("/")[-1].lower()
self.logger.debug("Fetching stream info")
headers = {
"Referer": self.url
}
res = urlget(self.PlayerURL.format(channelname), headers=headers)
match = re.search("'FlashVars', '(id=\d+)&s=(.+?)&", res.text)
if not match:
raise NoStreamsError(self.url)
channelname = "{0}?{1}".format(match.group(2), match.group(1))
res = urlget(self.BalancerURL, headers=headers)
match = re.search("redirect=(.+)", res.text)
if not match:
raise PluginError("Error retrieving RTMP address from loadbalancer")
rtmp = match.group(1)
streams = {}
streams["live"] = RTMPStream(self.session, {
"rtmp": "rtmp://{0}/live/{1}".format(rtmp, channelname),
"pageUrl": self.url,
"swfVfy": self.SWFURL,
"conn": "S:OK",
"live": True
})
return streams
__plugin__ = Mips
| {
"content_hash": "8e20456c8a605d26c059a97e38fbdfe3",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 85,
"avg_line_length": 30.596153846153847,
"alnum_prop": 0.6071653048397234,
"repo_name": "breunigs/livestreamer",
"id": "dffaab99f9d2215d3e40b35f73b965260d1e4b10",
"size": "1591",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/livestreamer/plugins/mips.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "445849"
},
{
"name": "Shell",
"bytes": "1078"
}
],
"symlink_target": ""
} |
print("Hello World. \n Welcome to Python!")
| {
"content_hash": "333723c170efa082ea03852ca87c965d",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 43,
"avg_line_length": 44,
"alnum_prop": 0.7045454545454546,
"repo_name": "chrlttv/Teaching",
"id": "ea6cdae8fa497f25b21371a920dc956486740acd",
"size": "44",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Session1/scripts/run_hello_world.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "332760"
},
{
"name": "Python",
"bytes": "44"
}
],
"symlink_target": ""
} |
import argparse
import datetime
from pathlib import Path
import time
import config
from logger import LOGGER
from marion_county import scrape_geojson
from crime import acquire_crime_data_as_json
def validate_date(input_date: str) -> datetime.date:
"""
Takes an input string argument, ensures it's a date and returns as date object
Args:
input_date (str): String representation of date
Returns:
Date object
"""
try:
return datetime.datetime.strptime(input_date, "%Y-%m-%d").date()
except ValueError:
msg = "Not a valid date: '{0}'.".format(input_date)
raise argparse.ArgumentTypeError(msg)
def main():
"""
Main access point for application
"""
parser = argparse.ArgumentParser()
parser.add_argument('--start_date', required=True, help='Start Date for Crime Pull', type=validate_date)
parser.add_argument('--end_date', required=True, help='End Date for Crime Pull', type=validate_date)
parser.add_argument('--save_directory', required=True, help='Where to save all of this', type=Path)
args = parser.parse_args()
start = datetime.datetime.now()
save_directory = args.save_directory
if not save_directory.exists():
LOGGER.warning('%s does not exist. Creating directory' % save_directory.as_posix())
save_directory.mkdir()
acquire_crime_data_as_json(args.start_date, args.end_date, save_directory / 'crime.json')
for key, values in config.INDYGIS_ARC_MAP.items():
LOGGER.info('Acquiring %s geojson...' % values['name'])
file_name = values['name'] + '.geojson'
try:
scrape_geojson(save_directory / file_name, key)
except:
LOGGER.warning('Failed to acquire %s' % values['name'])
LOGGER.info('Complete')
time.sleep(10)
end = datetime.datetime.now()
LOGGER.info('Data grab complete')
LOGGER.info('Total time: %f minutes' % float((end - start).seconds / 60))
if __name__ == '__main__':
main()
| {
"content_hash": "03d0a28195c096a27206a225c4b35128",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 108,
"avg_line_length": 29.391304347826086,
"alnum_prop": 0.6503944773175543,
"repo_name": "awburgess/irvington_data",
"id": "ba64bb1fe4688892aa9bb9a34e7637b314df11fd",
"size": "2244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data_grabber.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18828"
}
],
"symlink_target": ""
} |
import unittest, json, mock
from mock import patch
from app import payments
from app.db.interface import PaymentService, PaymentNotFoundError
from flask_api import status # HTTP Status Codes
from flask import make_response,jsonify
from app.error_handlers import DataValidationError
from mock import patch
CC_DETAIL = {'user_name' : 'Jimmy Jones', 'card_number' : '1111222233334444',
'expires' : '01/2019', 'card_type' : 'Mastercard'}
DC_DETAIL = {'user_name' : 'Jeremy Jenkins', 'card_number' : '4444333322221111',
'expires' : '02/2020', 'card_type' : 'Visa'}
PP_DETAIL = {'user_name' : 'John Jameson', 'user_email' : 'jj@aol.com'}
CREDIT = {'nickname' : 'my credit', 'user_id' : 1, 'payment_type' : 'credit',
'details' : CC_DETAIL}
DEBIT = {'nickname' : 'my debit', 'user_id' : 1, 'payment_type' : 'debit',
'details' : DC_DETAIL}
PAYPAL = {'nickname' : 'my paypal', 'user_id' : 1, 'payment_type' : 'paypal',
'details' : PP_DETAIL}
# for put updates
PUT_CREDIT = {'nickname' : 'favcredit', 'user_id' : 1, 'payment_type' : 'credit',
'details' : CC_DETAIL}
PUT_CREDIT_RETURN = {'nickname' : 'favcredit', 'user_id' : 1, 'payment_type' : 'credit',
'details' : CC_DETAIL, 'is_default' : False, 'charge_history' : 0.0, 'payment_id' : 1}
# for patch updates
PATCH_CREDIT = { 'nickname' : 'boringcredit'}
PATCH_RETURN = {'nickname' : 'boringcredit', 'user_id' : 1, 'payment_type' : 'credit',
'details' : CC_DETAIL, 'is_default' : False, 'charge_history' : 0.0, 'payment_id' : 1}
#note 'nickname' is spelled wrong
BAD_DATA = {'bad key' : 'my paypal', 'user_id' : 2, 'payment_type' : 'paypal',
'details' : PP_DETAIL}
BAD_DATA2 = {"nicknam3" : "my paypal", "user_id" : 1, "payment_type" : "paypal",
"details" : {"user_name" : "John Jameson", "user_email" : "jj@aol.com"}}
PP_RETURN = dict(PAYPAL, is_default=False, charge_history=0.0, payment_id=3)
PP_RETURN['details']['is_linked'] = True
CC_RETURN = dict(CREDIT, is_default=False, charge_history=0.0, payment_id=1)
DC_RETURN = dict(DEBIT, is_default=False, charge_history=0.0, payment_id=2)
SAMPLE_PAYMENT = {
'id': 0,
'nickname': 'my credit',
'user_id': 1,
'payment_type': 'credit',
'details':
{
'user_name': 'Jimmy Jones',
'card_number': '1111222233334444',
'expires': '01/2019',
'card_type': 'Mastercard'
}
}
SAMPLE_PAYMENTS = [
{
'id': 0,
'nickname': 'credit_one',
'user_id': 1,
'payment_type': 'credit',
'details':
{
'user_name': 'Tommy Stones',
'card_number': '123456789000',
'expires': '02/2020',
'card_type': 'Mastercard'
}
},
{
'id': 1,
'nickname': 'credit_two',
'user_id': 1,
'payment_type': 'paypal',
'details':
{
'user_name': 'Tommy Stones',
'user_email': 'tommy@stones.abc',
'is_linked': True
}
},
{
'id': 2,
'nickname': 'amex',
'user_id': 2,
'payment_type': 'credit',
'details':
{
'user_name': 'Jillian Jasper',
'card_number': '0101010101010101',
'expires': '12/2020',
'card_type': 'American Express'
}
}
]
class TestPaymentsCRUD(unittest.TestCase):
"""
Test cases for CRUD methods contained in payments.py.
"""
def setUp(self):
# Important! Need to use the test_client method in order to test the flask-made routes
self.app = payments.app.test_client()
def test_get_payments_ok(self):
# return 200 OK and a simple payload for a successful request
id = 0
with patch.object(PaymentService, 'get_payments', return_value=SAMPLE_PAYMENT) as mocked_service:
response = self.app.get('/payments/{}'.format(id))
# when doing the assertion methods on a mocked object, make *very* sure that the method
# is one of the actual methods; otherwise the assertion will be meaningless
mocked_service.assert_called_once_with(payment_ids=[id])
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(json.loads(response.data), SAMPLE_PAYMENT)
def test_get_payments_not_found(self):
# something goes wrong with the GET request and the resource cannot be found
id = 0
with patch.object(PaymentService, 'get_payments', side_effect=Exception) as mocked_service:
error_response = payments.NOT_FOUND_ERROR_BODY
error_response['error'] = error_response['error'].format(id)
response = self.app.get('/payments/{}'.format(id))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(json.loads(response.data), error_response)
def test_list_payments_all(self):
# ensure that all payments are returned
with patch.object(PaymentService, 'get_payments', return_value=SAMPLE_PAYMENTS) as mocked_service:
response = self.app.get('/payments')
mocked_service.assert_called_once_with()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(json.loads(response.data), SAMPLE_PAYMENTS)
def test_list_payments_by_ids(self):
# return payments that have specific ids
ids = [1,2]
# the final query string will look like: ?ids=1&ids=2
# flask will know how to deal with a query param appearing multiple times
ids_query_string = 'ids={}&ids={}'.format(ids[0], ids[1])
payments_to_return = SAMPLE_PAYMENTS[1:3]
with patch.object(PaymentService, 'get_payments', return_value=payments_to_return) as mocked_service:
response = self.app.get('/payments?{}'.format(ids_query_string))
mocked_service.assert_called_once_with(payment_ids=ids)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(json.loads(response.data), payments_to_return)
def test_list_payments_by_attribute(self):
# return payments that have a specific attribute
specific_attribute = 'payment_type'
specific_attribute_value = 'paypal'
attribute_params = {'payment_type': 'paypal'}
paypal_payment = SAMPLE_PAYMENTS[1]
with patch.object(PaymentService, 'get_payments', return_value=paypal_payment) as mocked_service:
response = self.app.get('/payments?{}={}'.format(specific_attribute, specific_attribute_value))
mocked_service.assert_called_once_with(payment_attributes=attribute_params)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(json.loads(response.data), paypal_payment)
def test_list_payments_not_found(self):
# attempt to retrieve payments and catch the exception raised; return 404
with patch.object(PaymentService, 'get_payments', side_effect=Exception) as mocked_service:
response = self.app.get('/payments')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(json.loads(response.data), payments.GENERAL_NOT_FOUND_ERROR)
def test_list_payments_with_ids_and_other_params(self):
# test to make sure that the ids parameter takes priority over other parameters
ids = [1,2]
ids_query_string = 'ids={}&ids={}'.format(ids[0], ids[1])
payments_to_return = SAMPLE_PAYMENTS[1:3]
other_param = 'nickname'
other_param_value = 'amex'
with patch.object(PaymentService, 'get_payments', return_value=payments_to_return) as mocked_service:
query_string = '{}&{}={}'.format(ids_query_string, other_param, other_param_value)
response = self.app.get('/payments?{}'.format(query_string))
# important - we should call the get_payments method with payment_ids, *not* payment_attributes
mocked_service.assert_called_once_with(payment_ids=ids)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(json.loads(response.data), payments_to_return)
@mock.patch.object(PaymentService, 'add_payment', return_value=CC_RETURN)
def test_crud_create_card(self, mock_ps_add):
data = json.dumps(CREDIT)
resp = self.app.post('/payments', data=data, content_type='application/json')
mock_ps_add.assert_called_with(CREDIT)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
self.assertEqual(json.loads(resp.data), {'created' : CC_RETURN})
@mock.patch.object(PaymentService, 'add_payment', return_value=PP_RETURN)
def test_crud_create_paypal(self, mock_ps_add):
data = json.dumps(PAYPAL)
resp = self.app.post('/payments', data=data, content_type='application/json')
mock_ps_add.assert_called_with(PAYPAL)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
self.assertEqual(json.loads(resp.data), {'created' : PP_RETURN})
@mock.patch.object(PaymentService, 'add_payment')
def test_crud_create_two_cards(self, mock_ps_add):
data = json.dumps(CREDIT)
mock_ps_add.return_value = CC_RETURN
resp = self.app.post('/payments', data=data, content_type='application/json')
mock_ps_add.assert_called_with(CREDIT)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
self.assertEqual(json.loads(resp.data), {'created' : CC_RETURN})
data = json.dumps(DEBIT)
mock_ps_add.return_value = DC_RETURN
resp = self.app.post('/payments', data=data, content_type='application/json')
mock_ps_add.assert_called_with(DEBIT)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
self.assertEqual(json.loads(resp.data), {'created' : DC_RETURN})
@mock.patch.object(PaymentService, 'add_payment', side_effect=DataValidationError)
def test_crud_create_bad_data_single_quotes(self, mock_ps_add):
data = json.dumps(BAD_DATA)
resp = self.app.post('/payments', data=data, content_type='application/json')
mock_ps_add.assert_called_with(BAD_DATA)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue("error" in resp.data)
@mock.patch.object(PaymentService, 'add_payment', side_effect=DataValidationError)
def test_crud_create_bad_data_double_quotes(self, mock_ps_add):
data = json.dumps(BAD_DATA2)
resp = self.app.post('/payments', data=data, content_type='application/json')
mock_ps_add.assert_called_with(BAD_DATA2)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue("error" in resp.data)
@mock.patch.object(PaymentService, 'add_payment', side_effect=DataValidationError)
def test_crud_create_garbage(self, mock_ps_add):
garbage = 'a@$*&@#sdassdc3r 3284723X43&^@!#@*#'
resp = self.app.post('/payments', data=garbage, content_type='application/json')
mock_ps_add.assert_called_with(None)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue("error" in resp.data)
#Tests for Deleting
@mock.patch.object(PaymentService, 'remove_payment')
def test_delete_payment_with_valid_id(self, mock_ps_delete):
resp = self.app.delete('/payments/1')
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
self.assertTrue('' in resp.data)
@mock.patch.object(PaymentService, 'remove_payment')
def test_delete_payment_with_invalid_id(self, mock_ps_delete):
resp = self.app.delete('/payments/1345345')
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
self.assertTrue('' in resp.data)
def test_delete_payment_with_gibberish_id(self):
resp = self.app.delete('/payments/jghjeshg')
#will not go to the payment service so no need to mock
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
self.assertTrue('Not Found' in resp.data)
#Tests for action - set-default
@mock.patch.object(PaymentService, 'perform_payment_action')
def test_set_default_action_with_no_payments_for_user_id(self, mock_db_action):
user_id = payment_id = 1
payment_data = {'payment_id': payment_id}
data = json.dumps(payment_data)
mock_db_action.side_effect = DataValidationError('Payments not found for the user_id: {}'.format(user_id))
resp = self.app.patch('payments/{}/set-default'.format(user_id),data=data, content_type='application/json')
mock_db_action.assert_called_once_with(user_id, payment_attributes=payment_data)
self.assertRaises(DataValidationError)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue('error' in resp.data)
self.assertTrue('Payments not found' in resp.data)
@mock.patch.object(PaymentService, 'perform_payment_action', return_value=True)
def test_set_default_action_success(self, mock_db_action):
user_id = payment_id = 1
payment_data = {'payment_id': payment_id}
data = json.dumps(payment_data)
resp = self.app.patch('payments/{}/set-default'.format(user_id),data=data, content_type='application/json')
mock_db_action.assert_called_once_with(user_id, payment_attributes=payment_data)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertTrue('success' in resp.data)
@mock.patch.object(PaymentService, 'perform_payment_action', return_value=False)
def test_set_default_action_with_no_default_payment(self, mock_db_action):
user_id = payment_id = 1
payment_data = {'payment_id': payment_id}
data = json.dumps(payment_data)
resp = self.app.patch('payments/{}/set-default'.format(user_id),data=data, content_type='application/json')
mock_db_action.assert_called_once_with(user_id, payment_attributes=payment_data)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
self.assertTrue('No Payment' in resp.data)
@mock.patch.object(PaymentService, 'perform_payment_action')
def test_set_default_action_with_no_request_data(self, mock_db_action):
user_id = 1
resp = self.app.patch('payments/{}/set-default'.format(user_id),data=None, content_type='application/json')
mock_db_action.assert_not_called()
self.assertRaises(DataValidationError)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue('no data' in resp.data)
@mock.patch.object(PaymentService, 'perform_payment_action')
def test_set_default_action_with_text_request_data(self, mock_db_action):
user_id = payment_id = 1
payment_data = "payment_id"
resp = self.app.patch('payments/{}/set-default'.format(user_id),data=payment_data, content_type='text/plain')
mock_db_action.assert_not_called()
self.assertRaises(DataValidationError)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue('not json' in resp.data)
@mock.patch.object(PaymentService, 'perform_payment_action')
def test_set_default_action_with_wrong_request_data(self, mock_db_action):
user_id = payment_id = 1
payment_data = {'random_id': payment_id}
data = json.dumps(payment_data)
resp = self.app.patch('payments/{}/set-default'.format(user_id),data=data, content_type='application/json')
mock_db_action.assert_not_called()
self.assertRaises(KeyError)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue('does not have the payment_id' in resp.data)
#Tests for action - charge
@mock.patch.object(PaymentService, 'perform_payment_action')
def test_charge_action_with_no_payments_for_user_id(self, mock_db_action):
user_id = 1
amount_data = {'amount' : 25.0 }
data = json.dumps(amount_data)
mock_db_action.side_effect = DataValidationError('Payments not found for the user_id: {}'.format(user_id))
resp = self.app.patch('payments/{}/charge'.format(user_id),data=data, content_type='application/json')
mock_db_action.assert_called_once_with(user_id, payment_attributes=amount_data)
self.assertRaises(DataValidationError)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue('error' in resp.data)
self.assertTrue('Payments not found' in resp.data)
@mock.patch.object(PaymentService, 'perform_payment_action')
def test_charge_action_with_no_request_data(self, mock_db_action):
user_id = 1
resp = self.app.patch('payments/{}/charge'.format(user_id),data=None, content_type='application/json')
mock_db_action.assert_not_called()
self.assertRaises(DataValidationError)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue('no data' in resp.data)
@mock.patch.object(PaymentService, 'perform_payment_action')
def test_charge_action_with_text_request_data(self, mock_db_action):
user_id = 1
amount_data = "payment_id"
resp = self.app.patch('payments/{}/charge'.format(user_id),data=amount_data, content_type='text/plain')
mock_db_action.assert_not_called()
self.assertRaises(DataValidationError)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue('not json' in resp.data)
@mock.patch.object(PaymentService, 'perform_payment_action')
def test_charge_action_with_wrong_request_data(self, mock_db_action):
user_id = 1
amount_data = {'charge-amount' : 25.0 }
data = json.dumps(amount_data)
resp = self.app.patch('payments/{}/charge'.format(user_id),data=data, content_type='application/json')
mock_db_action.assert_not_called()
self.assertRaises(KeyError)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue('does not have the amount' in resp.data)
@mock.patch.object(PaymentService, 'perform_payment_action')
def test_charge_action_with_negative_amount(self, mock_db_action):
user_id = 1
amount_data = {'amount' : -25.0 }
data = json.dumps(amount_data)
resp = self.app.patch('payments/{}/charge'.format(user_id),data=data, content_type='application/json')
mock_db_action.assert_not_called()
self.assertRaises(DataValidationError)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue('Order amount is negative.' in resp.data)
@mock.patch.object(PaymentService, 'perform_payment_action', return_value=True)
def test_charge_action_success(self, mock_db_action):
user_id = 1
amount_data = {'amount' : 25.0 }
data = json.dumps(amount_data)
resp = self.app.patch('payments/{}/charge'.format(user_id),data=data, content_type='application/json')
mock_db_action.assert_called_once_with(user_id, payment_attributes=amount_data)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertTrue('success' in resp.data)
@mock.patch.object(PaymentService, 'perform_payment_action')
def test_charge_action_with_no_default_payment_for_user(self, mock_db_action):
user_id = 1
amount_data = {'amount' : 25.0 }
data = json.dumps(amount_data)
mock_db_action.side_effect = DataValidationError('Invalid request: Default Payment for this user_id: {} not found. Please update the default_payment first.'.format(user_id))
resp = self.app.patch('payments/{}/charge'.format(user_id),data=data, content_type='application/json')
mock_db_action.assert_called_once_with(user_id, payment_attributes=amount_data)
self.assertRaises(DataValidationError)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue('error' in resp.data)
self.assertTrue('Invalid request: Default Payment' in resp.data)
@mock.patch.object(PaymentService, 'perform_payment_action')
def test_charge_action_with_default_payment_card_expired(self, mock_db_action):
user_id = 1
amount_data = {'amount' : 25.0 }
data = json.dumps(amount_data)
mock_db_action.side_effect = DataValidationError('Invalid request: Default Payment for this user_id: {} (credit) is expired'.format(user_id))
resp = self.app.patch('payments/{}/charge'.format(user_id),data=data, content_type='application/json')
mock_db_action.assert_called_once_with(user_id, payment_attributes=amount_data)
self.assertRaises(DataValidationError)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue('error' in resp.data)
self.assertTrue('expired' in resp.data)
@mock.patch.object(PaymentService, 'perform_payment_action')
def test_charge_action_with_no_default_payment_paypal_not_linked(self, mock_db_action):
user_id = 1
amount_data = {'amount' : 25.0 }
data = json.dumps(amount_data)
mock_db_action.side_effect = DataValidationError('Invalid request: Default Payment for this user_id: {} (Paypal) is not linked'.format(user_id))
resp = self.app.patch('payments/{}/charge'.format(user_id),data=data, content_type='application/json')
mock_db_action.assert_called_once_with(user_id, payment_attributes=amount_data)
self.assertRaises(DataValidationError)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue('error' in resp.data)
self.assertTrue('not linked' in resp.data)
def test_index(self):
resp = self.app.get('/')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertTrue('Payments REST API Service' in resp.data)
#test cases for update payments - put and patch
# passing correct data to put
@mock.patch.object(PaymentService, 'update_payment', return_value=PUT_CREDIT_RETURN)
def test_crud_update_put(self,mock_ps_update):
data = json.dumps(PUT_CREDIT)
resp = self.app.put('/payments/1', data=data, content_type='application/json')
mock_ps_update.assert_called_with(1,payment_replacement=PUT_CREDIT)
self.assertEqual( resp.status_code, status.HTTP_200_OK )
new_json = json.loads(resp.data)
self.assertEqual (new_json['nickname'], 'favcredit')
# passing correct data to patch
@mock.patch.object(PaymentService, 'update_payment', return_value=PATCH_RETURN)
def test_crud_update_patch(self,mock_ps_update):
data = json.dumps(PATCH_CREDIT)
resp = self.app.patch('/payments/1', data=data, content_type='application/json')
mock_ps_update.assert_called_with(1,payment_attributes=PATCH_CREDIT)
self.assertEqual( resp.status_code, status.HTTP_200_OK )
new_json = json.loads(resp.data)
self.assertEqual (new_json['nickname'], 'boringcredit')
# passing text data to put
def test_crud_update_put_with_text_data(self):
resp = self.app.put('/payments/1', data="hello", content_type='text/plain')
self.assertEqual( resp.status_code, status.HTTP_400_BAD_REQUEST )
self.assertTrue('not json' in resp.data)
# passing text data to patch
def test_crud_update_patch_with_text_data(self):
resp = self.app.patch('/payments/1', data="hello", content_type='text/plain')
self.assertEqual( resp.status_code, status.HTTP_400_BAD_REQUEST )
self.assertTrue('not json' in resp.data)
# passing no data to put
def test_crud_update_put_with_no_data(self):
resp = self.app.put('/payments/1', data=None, content_type='application/json')
self.assertEqual( resp.status_code, status.HTTP_400_BAD_REQUEST )
self.assertTrue('no data' in resp.data)
# passing no data to patch
def test_crud_update_patch_with_no_data(self):
resp = self.app.patch('/payments/1', data=None, content_type='application/json')
self.assertEqual( resp.status_code, status.HTTP_400_BAD_REQUEST )
self.assertTrue('no data' in resp.data)
# passing garbage data to put
#@mock.patch.object(PaymentService, 'update_payment',side_effect=DataValidationError)
def test_crud_update_put_garbage(self):
garbage = "a@$*&@#sdassdc3r 3284723X43&^@!#@*#"
resp = self.app.put('/payments/1',data = garbage, content_type='application/json')
self.assertTrue('bad or no data' in resp.data)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
# passing garbage data to patch
def test_crud_update_patch_garbage(self):
garbage = "a@$*&@#sdassdc3r 3284723X43&^@!#@*#"
resp = self.app.patch('/payments/1',data = garbage, content_type='application/json')
self.assertTrue('bad or no data' in resp.data)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
# passing a non existing payment id to PATCH
@mock.patch.object(PaymentService, 'update_payment', side_effect=PaymentNotFoundError('Invalid payment: Payment ID not found'))
def test_crud_update_id_not_found_patch(self,mock_ps_update):
credit = json.dumps({'nickname' : 'mycredit'})
resp = self.app.patch('payments/778',data = credit,content_type='application/json')
mock_ps_update.assert_called_with(778,payment_attributes={'nickname' : 'mycredit'})
self.assertTrue('Invalid payment: Payment ID not found' in resp.data)
self.assertTrue(resp.status_code, status.HTTP_404_NOT_FOUND)
# passing a non existing payment id to PUT
@mock.patch.object(PaymentService, 'update_payment', side_effect=PaymentNotFoundError('Invalid payment: Payment ID not found'))
def test_crud_update_id_not_found_put(self,mock_ps_update):
credit = json.dumps(CC_RETURN)
resp = self.app.put('payments/778',data = credit,content_type='application/json')
mock_ps_update.assert_called_with(778,payment_replacement=CC_RETURN)
self.assertTrue('Invalid payment: Payment ID not found' in resp.data)
self.assertTrue(resp.status_code, status.HTTP_404_NOT_FOUND)
| {
"content_hash": "db9472cb73d8eb4818c32e04d43351b8",
"timestamp": "",
"source": "github",
"line_count": 522,
"max_line_length": 181,
"avg_line_length": 50.71072796934866,
"alnum_prop": 0.6608741641796683,
"repo_name": "devops-s17-payments/payments",
"id": "c2f57ffbddf208e5c7f0753053c9e59fa2aeec9e",
"size": "26596",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_payments.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1676"
},
{
"name": "Gherkin",
"bytes": "5141"
},
{
"name": "HTML",
"bytes": "1889"
},
{
"name": "Python",
"bytes": "115568"
},
{
"name": "Shell",
"bytes": "450"
}
],
"symlink_target": ""
} |
from django.conf.urls import *
from .views import *
urlpatterns = patterns('guias_cacao.views',
url(r'^$', 'index_ficha_sombra', name='index-cacao'),
url(r'^sombra/riqueza/$', 'riqueza_sombra', name='riqueza-cacao'),
url(r'^sombra/analisis/$', 'analisis_sombra', name='analisis-cacao'),
url(r'^sombra/cobertura/$', 'cobertura_sombra', name='cobertura-cacao'),
url(r'^sombra/densidad/$', 'densidad_sombra', name='densidad-cacao'),
url(r'^sombra/acciones/$', 'acciones_sombra', name='acciones-cacao'),
url(r'^sombra/caracterizacion/$', 'caracterizacion_sombra', name='caracterizacion-cacao'),
url(r'^sombra/dominancia/$', 'dominancia_sombra', name='dominancia-cacao'),
url(r'^sombra/dimensiones/$', 'dimensiones_sombra', name='dimensiones-cacao'),
#urls de poda
url(r'^poda/altura/$', 'altura_poda', name='altura-poda'),
url(r'^poda/ancho-copa/$', 'ancho_poda', name='ancho-poda'),
url(r'^poda/produccion/$', 'produccion_poda', name='produccion-poda'),
url(r'^poda/atributos/$', 'atributos_poda', name='produccion-poda'),
url(r'^poda/analisis/$', 'analisis_poda', name='analisis-poda'),
url(r'^poda/tipos-poda/$', 'tipo_poda', name='tipo-poda'),
url(r'^poda/acciones/$', 'acciones_poda', name='acciones-poda'),
#urls de plaga
url(r'^plaga/historial/$', 'historial_plaga', name='historial-plaga'),
url(r'^plaga/acciones/$', 'acciones_plaga', name='acciones-plaga'),
url(r'^plaga/fuente-incidencia/$', 'fuente_incidencia_plaga', name='fuente-incidencia-plaga'),
url(r'^plaga/produccion/$', 'produccion_rendimiento_plaga', name='produccion-rendimiento-plaga'),
url(r'^plaga/analisis/$', 'analisis_plaga', name='analisis-plaga'),
url(r'^plaga/suelo/$', 'observacion_sombra_poda_plaga', name='observacion-sombra-poda-plaga'),
url(r'^plaga/acciones-manejo/$', 'acciones_manejo_plaga', name='acciones-manejo-plaga'),
url(r'^plaga/equipos-formacion/$', 'equipos_formacion_plaga', name='equipos-formacion-plaga'),
#urls de piso
url(r'^piso/estado/$', 'estado_piso', name='estado-piso'),
url(r'^piso/manejo/$', 'estado_piso2', name='estado-piso-2'),
url(r'^piso/orientacion/$', 'orientacion_composicion_piso', name='orientacion-composicion-piso'),
url(r'^piso/analisis/$', 'analisis_piso', name='analisis-piso'),
url(r'^piso/suelo/$', 'suelo_piso', name='suelo-piso'),
url(r'^piso/propuesta/$', 'propuesta_piso', name='propuesta-piso'),
url(r'^piso/equipo-formacion/$', 'equipo_piso', name='equipo-piso'),
#urls cosecha
url(r'^cosecha/conversaciones/$', 'conversacion_cosecha', name='conversacion-cosecha'),
url(r'^cosecha/mazorcas-sanas/$', 'datos_sanos_cosecha', name='datos-sanos-cosecha'),
url(r'^cosecha/mazorcas-enfermas/$', 'datos_enfermas_cosecha', name='datos-enfermas-cosecha'),
url(r'^cosecha/mazorcas-danadas/$', 'datos_danadas_cosecha', name='datos-danadas-cosecha'),
url(r'^cosecha/analisis/$', 'analisis_cosecha', name='analisis-cosecha'),
#urls cierre
url(r'^cierre/sombra/$', 'sombra_cierre', name='sombra-cierre'),
url(r'^cierre/poda/$', 'poda_cierre', name='poda-cierre'),
url(r'^cierre/suelo/$', 'suelo_cierre', name='suelo-cierre'),
url(r'^cierre/plaga/$', 'plaga_cierre', name='plaga-cierre'),
url(r'^cierre/piso/$', 'piso_cierre', name='piso-cierre'),
url(r'^cierre/vivero/$', 'vivero_cierre', name='vivero-cierre'),
url(r'^cierre/cosecha/$', 'cosecha_cierre', name='cosecha-cierre'),
url(r'^cierre/ciclo-trabajo/$', 'ciclo_trabajo_cierre', name='ciclo-trabajo-cierre'),
url(r'^cierre/costos/$', 'calculos_costo_cierre', name='calculos-costo-cierre'),
url(r'^cierre/tablas/$', 'tablas_cierre', name='tablas-cierre'),
#urls saf
url(r'^saf/objetivos/$', 'objetivos_saf', name='objetivos-saf'),
url(r'^saf/clima/$', 'clima_saf', name='clima-saf'),
url(r'^saf/condiciones/$', 'condiciones_saf', name='condiciones-saf'),
url(r'^saf/sombra/$', 'sombra_saf', name='sombra-saf'),
url(r'^saf/semilla/$', 'semilla_saf', name='semilla-saf'),
url(r'^saf/calidad/$', 'calidad_saf', name='calidad-saf'),
url(r'^saf/disenio-saf/$', 'disenio_saf_saf', name='disenio-saf'),
#urls vivero
url(r'^vivero/conversaciones/$', 'conversaciones_vivero', name='conservaciones-vivero'),
url(r'^vivero/conversaciones-dos/$', 'conversaciones_dos_vivero', name='conservaciones-dos-vivero'),
url(r'^vivero/observacion/$', 'observacion_vivero', name='observacion-vivero'),
url(r'^vivero/analisis/$', 'analisis_vivero', name='analisis-vivero'),
#urls suelo
url(r'^suelo/historial/$', 'historial_limitaciones', name='historial-limitantes'),
url(r'^suelo/erosion/$', 'suelo_erosion', name='suelo-erosion'),
url(r'^suelo/obras/$', 'suelo_obras', name='suelo-obras'),
url(r'^suelo/indicador-drenaje/$', 'suelo_indicador_drenaje', name='suelo-indicador-drenaje'),
url(r'^suelo/obras-drenaje/$', 'suelo_obras_drenaje', name='suelo-obras-drenaje'),
url(r'^suelo/salud-raices/$', 'suelo_salud_raices', name='suelo-salud-raices'),
url(r'^suelo/toma-desicion/$', 'suelo_varios', name='suelo-varios'),
url(r'^suelo/salidas/$', 'suelo_salidas', name='suelo-salidas'),
#urls apis
url(r'^api/productor/$', 'get_productor', name='productor-cacao'),
url(r'^mapacacao/$', 'obtener_lista_mapa_cacao', name='obtener-lista-mapa-cacao'),
url(r'^colabore/$', 'contact', name='contactar-cacao'),
)
| {
"content_hash": "221bed82075cb0a8f61932c687ca8982",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 104,
"avg_line_length": 60.10989010989011,
"alnum_prop": 0.6616087751371115,
"repo_name": "CARocha/ciat_plataforma",
"id": "30d2824c1239335afa557a92a8e84d5157e6a578",
"size": "5470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "guias_cacao/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "471726"
},
{
"name": "HTML",
"bytes": "1860390"
},
{
"name": "JavaScript",
"bytes": "1492281"
},
{
"name": "Python",
"bytes": "3679114"
}
],
"symlink_target": ""
} |
cells = AdminConfig.list('Cell').splitlines()
for cell in cells:
cname = AdminConfig.showAttribute(cell, 'name')
cluster_entries = []
server_entries = []
cluster_apps = []
server_apps = []
# Get list of Nodes
nodes = AdminConfig.list('Node', cell).splitlines()
for node in nodes:
nname = AdminConfig.showAttribute(node, 'name')
print 'Node: ' + nname
# Get list of servers on node. We only need servers of type APPLICATION
servers = AdminTask.listServers('[-serverType APPLICATION_SERVER -nodeName %s]' % nname).splitlines()
for server in servers:
server_name = AdminConfig.showAttribute(server, "name")
server_apps += AdminApp.list("WebSphere:cell=%s,node=%s,server=%s" % (cname, nname, server_name)).splitlines()
print '\tServer: ' + server_name
print '\t\t', server_apps
server_entries += ['"%s=%s": [ %s ]' % (nname, server_name, ','.join([ '"%s"' % app for app in server_apps]))]
clusters = AdminConfig.list('ServerCluster').splitlines()
# Get list of clusters on node
for cluster in clusters:
cluster_name = AdminConfig.showAttribute(cluster, "name")
print '\tCluster: ' + cluster_name
cluster_apps += AdminApp.list("WebSphere:cell=%s,node=%s,cluster=%s" % (cname, nname, cluster_name)).splitlines()
print '\t\t', cluster_apps
cluster_entries += ['"%s": [ %s ]' % (cluster_name, ','.join([ '"%s"' % app for app in cluster_apps]))]
print '{"servers": {%s}, "clusters": {%s}}' % (','.join(server_entries), ','.join(cluster_entries))
| {
"content_hash": "acc4ba0f281697b1261cd210548720cd",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 116,
"avg_line_length": 39.473684210526315,
"alnum_prop": 0.6593333333333333,
"repo_name": "electric-cloud/EC-WebSphere",
"id": "0b74d71dae2d428787d30c3871760d2929c0dadf",
"size": "2119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/resources/project/wsadmin_scripts/discover.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASL",
"bytes": "180220"
},
{
"name": "Batchfile",
"bytes": "2628"
},
{
"name": "CSS",
"bytes": "2058"
},
{
"name": "Groovy",
"bytes": "917511"
},
{
"name": "Java",
"bytes": "123534"
},
{
"name": "Perl",
"bytes": "895572"
},
{
"name": "Python",
"bytes": "91802"
},
{
"name": "Raku",
"bytes": "10613"
},
{
"name": "Shell",
"bytes": "20136"
}
],
"symlink_target": ""
} |
"""A vtctld2 webdriver test that tests the different views of status page."""
import logging
import os
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import NoSuchElementException
import unittest
from vtproto import vttest_pb2
from vttest import environment as vttest_environment
from vttest import local_database
from vttest import mysql_flavor
import environment
import utils
def setUpModule():
try:
if utils.options.xvfb:
try:
# This will be killed automatically by utils.kill_sub_processes()
utils.run_bg(['Xvfb', ':15', '-ac'])
os.environ['DISPLAY'] = ':15'
except OSError as err:
# Despite running in background, utils.run_bg() will throw immediately
# if the Xvfb binary is not found.
logging.error(
"Can't start Xvfb (will try local DISPLAY instead): %s", err)
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
utils.remove_tmp_files()
class TestVtctldWeb(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Set up two keyspaces: one unsharded, one with two shards."""
if os.environ.get('CI') == 'true' and os.environ.get('TRAVIS') == 'true':
username = os.environ['SAUCE_USERNAME']
access_key = os.environ['SAUCE_ACCESS_KEY']
capabilities = {}
capabilities['tunnel-identifier'] = os.environ['TRAVIS_JOB_NUMBER']
capabilities['build'] = os.environ['TRAVIS_BUILD_NUMBER']
capabilities['platform'] = 'Linux'
capabilities['browserName'] = 'chrome'
hub_url = '%s:%s@localhost:4445' % (username, access_key)
cls.driver = webdriver.Remote(
desired_capabilities=capabilities,
command_executor='http://%s/wd/hub' % hub_url)
else:
os.environ['webdriver.chrome.driver'] = os.path.join(
os.environ['VTROOT'], 'dist')
# Only testing against Chrome for now
cls.driver = webdriver.Chrome()
topology = vttest_pb2.VTTestTopology()
topology.cells.append('test')
topology.cells.append('test2')
keyspace = topology.keyspaces.add(name='test_keyspace')
keyspace.replica_count = 2
keyspace.rdonly_count = 2
keyspace.shards.add(name='-80')
keyspace.shards.add(name='80-')
keyspace2 = topology.keyspaces.add(name='test_keyspace2')
keyspace2.shards.add(name='0')
keyspace2.replica_count = 2
keyspace2.rdonly_count = 1
port = environment.reserve_ports(1)
vttest_environment.base_port = port
mysql_flavor.set_mysql_flavor(None)
cls.db = local_database.LocalDatabase(
topology, '', False, None,
os.path.join(os.environ['VTTOP'], 'web/vtctld2/dist'),
os.path.join(os.environ['VTTOP'], 'test/vttest_schema/default'))
cls.db.setup()
cls.vtctld_addr = 'http://localhost:%d' % cls.db.config()['port']
utils.pause('Paused test after vtcombo was started.\n'
'For manual testing, connect to vtctld: %s' % cls.vtctld_addr)
@classmethod
def tearDownClass(cls):
cls.db.teardown()
cls.driver.quit()
def _get_dropdown_options(self, group):
status_content = self.driver.find_element_by_tag_name('vt-status')
dropdown = status_content.find_element_by_id(group)
return [op.text for op in
dropdown.find_elements_by_tag_name('option')]
def _get_dropdown_selection(self, group):
status_content = self.driver.find_element_by_tag_name('vt-status')
dropdown = status_content.find_element_by_id(group)
return dropdown.find_element_by_tag_name('label').text
def _change_dropdown_option(self, dropdown_id, dropdown_value):
status_content = self.driver.find_element_by_tag_name('vt-status')
dropdown = status_content.find_element_by_id(dropdown_id)
dropdown.click()
options = dropdown.find_elements_by_tag_name('li')
for op in options:
if op.text == dropdown_value:
logging.info('dropdown %s: option %s clicked', dropdown_id, op.text)
op.click()
break
def _check_dropdowns(self, keyspaces, selected_keyspace, cells, selected_cell,
types, selected_type, metrics, selected_metric):
"""Checking that all dropdowns have the correct options and selection."""
keyspace_options = self._get_dropdown_options('keyspace')
keyspace_selected = self._get_dropdown_selection('keyspace')
logging.info('Keyspace options: %s Keyspace selected: %s',
', '.join(keyspace_options), keyspace_selected)
self.assertListEqual(keyspaces, keyspace_options)
self.assertEqual(selected_keyspace, keyspace_selected)
cell_options = self._get_dropdown_options('cell')
cell_selected = self._get_dropdown_selection('cell')
logging.info('Cell options: %s Cell Selected: %s',
', '.join(cell_options), cell_selected)
self.assertListEqual(cells, cell_options)
self.assertEqual(selected_cell, cell_selected)
type_options = self._get_dropdown_options('type')
type_selected = self._get_dropdown_selection('type')
logging.info('Type options: %s Type Selected: %s',
', '.join(cell_options), cell_selected)
self.assertListEqual(types, type_options)
self.assertEqual(selected_type, type_selected)
metric_options = self._get_dropdown_options('metric')
metric_selected = self._get_dropdown_selection('metric')
logging.info('metric options: %s metric Selected: %s',
', '.join(metric_options), metric_selected)
self.assertListEqual(metrics, metric_options)
self.assertEqual(selected_metric, metric_selected)
def _check_heatmaps(self, selected_keyspace):
"""Checking that the view has the correct number of heatmaps drawn."""
status_content = self.driver.find_element_by_tag_name('vt-status')
keyspaces = status_content.find_elements_by_tag_name('vt-heatmap')
logging.info('Number of keyspaces found: %d', len(keyspaces))
if selected_keyspace == 'all':
available_keyspaces = self._get_dropdown_options('keyspace')
self.assertEqual(len(keyspaces), len(available_keyspaces)-1)
for ks in keyspaces:
heading = ks.find_element_by_id('keyspaceName')
logging.info('Keyspace name: %s', heading.text)
try:
ks.find_element_by_id(heading.text)
except NoSuchElementException:
self.fail('Cannot get keyspace')
self.assertIn(heading.text, available_keyspaces)
else:
self.assertEquals(len(keyspaces), 1)
heading = keyspaces[0].find_element_by_id('keyspaceName')
logging.info('Keyspace name: %s', heading.text)
try:
keyspaces[0].find_element_by_id(heading.text)
except NoSuchElementException:
self.fail('Cannot get keyspace')
self.assertEquals(heading.text, selected_keyspace)
def _check_new_view(
self, keyspaces, selected_keyspace, cells, selected_cell, types,
selected_type, metrics, selected_metric):
"""Checking the dropdowns and heatmaps for each newly routed view."""
logging.info('Testing realtime stats view')
self._check_dropdowns(keyspaces, selected_keyspace, cells, selected_cell,
types, selected_type, metrics, selected_metric)
self._check_heatmaps(selected_keyspace)
def test_realtime_stats(self):
logging.info('Testing realtime stats view')
# Navigate to the status page from initial app.
# TODO(thompsonja): Fix this once direct navigation works (after adding
# support for web-dir2 flag)
self.driver.get(self.vtctld_addr)
status_button = self.driver.find_element_by_partial_link_text('Status')
status_button.click()
wait = WebDriverWait(self.driver, 10)
wait.until(expected_conditions.visibility_of_element_located(
(By.TAG_NAME, 'vt-status')))
test_cases = [
(None, None, 'all', 'all', 'all'),
('type', 'REPLICA', 'all', 'all', 'REPLICA'),
('cell', 'test2', 'all', 'test2', 'REPLICA'),
('keyspace', 'test_keyspace', 'test_keyspace', 'test2', 'REPLICA'),
('cell', 'all', 'test_keyspace', 'all', 'REPLICA'),
('type', 'all', 'test_keyspace', 'all', 'all'),
('cell', 'test2', 'test_keyspace', 'test2', 'all'),
('keyspace', 'all', 'all', 'test2', 'all'),
]
for (dropdown_id, dropdown_val, keyspace, cell, tablet_type) in test_cases:
logging.info('Routing to new %s-%s-%s view', keyspace, cell, tablet_type)
if dropdown_id and dropdown_val:
self._change_dropdown_option(dropdown_id, dropdown_val)
tablet_type_options = ['all', 'MASTER', 'REPLICA', 'RDONLY']
if cell == 'test2':
tablet_type_options = ['all', 'REPLICA', 'RDONLY']
self._check_new_view(keyspaces=['all', 'test_keyspace', 'test_keyspace2'],
selected_keyspace=keyspace,
cells=['all', 'test', 'test2'],
selected_cell=cell,
types=tablet_type_options,
selected_type=tablet_type,
metrics=['lag', 'qps', 'health'],
selected_metric='health'
)
def add_test_options(parser):
parser.add_option(
'--no-xvfb', action='store_false', dest='xvfb', default=True,
help='Use local DISPLAY instead of headless Xvfb mode.')
if __name__ == '__main__':
utils.main(test_options=add_test_options)
| {
"content_hash": "6cc841e06212f475dcb12a891a07e44e",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 80,
"avg_line_length": 40.847457627118644,
"alnum_prop": 0.6529045643153527,
"repo_name": "erzel/vitess",
"id": "ff91f8e04909ff6d6dd18464704aea709f0def4d",
"size": "9662",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/vtctld2_web_status_test.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "10253"
},
{
"name": "CSS",
"bytes": "213341"
},
{
"name": "Go",
"bytes": "5244097"
},
{
"name": "HTML",
"bytes": "53738"
},
{
"name": "Java",
"bytes": "721903"
},
{
"name": "JavaScript",
"bytes": "41385"
},
{
"name": "Liquid",
"bytes": "6896"
},
{
"name": "Makefile",
"bytes": "7962"
},
{
"name": "PHP",
"bytes": "1001625"
},
{
"name": "Protocol Buffer",
"bytes": "99498"
},
{
"name": "Python",
"bytes": "867427"
},
{
"name": "Ruby",
"bytes": "466"
},
{
"name": "Shell",
"bytes": "41201"
},
{
"name": "TypeScript",
"bytes": "134312"
},
{
"name": "Yacc",
"bytes": "21577"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from collections import defaultdict
from checkmate.lib.analysis.base import BaseAnalyzer
import logging
logger = logging.getLogger(__name__)
class FormatAnalyzer(BaseAnalyzer):
def diff_summary(self,summary_a,summary_b):
return {
'd_number_of_lines' : summary_b['total_number_of_lines']-summary_a['total_number_of_lines'],
'd_number_of_characters' : summary_b['total_number_of_characters']-summary_a['total_number_of_characters'],
}
def summarize(self,items):
stats = defaultdict(lambda : {})
stats['total_number_of_lines'] = 0
stats['total_number_of_characters'] = 0
cnt = 0
for item in [item['stats'] for item in items if 'stats' in item]:
if 'number_of_lines' in item and 'number_of_characters' in item:
stats['total_number_of_lines'] += item['number_of_lines']
stats['total_number_of_characters'] += item['number_of_characters']
cnt += 1
if cnt:
stats['average_number_of_lines'] = stats['total_number_of_lines'] / float(cnt)
stats['average_number_of_characters'] = stats['total_number_of_characters'] / float(cnt)
stats['number_of_files'] = cnt
return dict(stats)
def analyze(self,file_revision):
stats = {}
issues = []
try:
file_content = file_revision.get_file_content()
stats['number_of_lines'] = len(file_content.decode("utf-8","ignore").split("\n"))
stats['number_of_characters'] = len(file_content.decode("utf-8","ignore"))
except KeyboardInterrupt:
raise
except:
logger.warning("Cannot read source file: %s" % file_revision.path)
pass
return {
'stats' : stats,
'issues' : issues,
}
| {
"content_hash": "1ae8dabad72f521885e8268164c79459",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 123,
"avg_line_length": 33.59649122807018,
"alnum_prop": 0.5827676240208878,
"repo_name": "quantifiedcode/checkmate",
"id": "fda76c20032ad2f46413ec41769d89c9af66eaa1",
"size": "1940",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "checkmate/contrib/plugins/python/metrics/analyzer.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "227"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "305549"
},
{
"name": "Shell",
"bytes": "37"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from celery import Celery
# instantiate Celery object
# celery = Celery(include=['abstar.utils.vdj'])
celery = Celery(include=['abstar.core.abstar'])
# import celery config file
celery.config_from_object('abstar.celeryconfig')
if __name__ == '__main__':
celery.start()
| {
"content_hash": "1fc8215c36ab89aba8c1f274c08c670e",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 48,
"avg_line_length": 19.9375,
"alnum_prop": 0.7084639498432602,
"repo_name": "briney/abstar",
"id": "3e1a553d9af1ac2506c662f96655d5c52f796f4e",
"size": "674",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "abstar/utils/queue/celery.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2845"
},
{
"name": "Python",
"bytes": "394297"
}
],
"symlink_target": ""
} |
import push.ssh
auto_events = []
class PushAborted(Exception):
"Raised when the deploy is cancelled."
def __init__(self, reason):
self.reason = reason
def __str__(self):
return self.reason
class Event(object):
"""An event that can have an arbitrary number of listeners that get called
when the event fires."""
def __init__(self, parent):
self.parent = parent
self.listeners = set()
def register_listener(self, callable):
self.listeners.add(callable)
return callable
def fire(self, *args, **kwargs):
for listener in self.listeners:
listener(self.parent, *args, **kwargs)
__call__ = register_listener
def event_wrapped(fn):
"""Wraps a function "fn" and fires the "fn_began" event before entering
the function, "fn_ended" after succesfully returning, and "fn_aborted"
on exception."""
began_name = fn.__name__ + "_began"
ended_name = fn.__name__ + "_ended"
aborted_name = fn.__name__ + "_aborted"
auto_events.extend((began_name, ended_name, aborted_name))
def proxy(self, *args, **kwargs):
getattr(self, began_name).fire(*args, **kwargs)
try:
result = fn(self, *args, **kwargs)
except Exception, e:
getattr(self, aborted_name).fire(e)
raise
else:
getattr(self, ended_name).fire(*args, **kwargs)
return result
return proxy
class Deployer(object):
def __init__(self, config, args, log, host_source):
self.config = config
self.args = args
self.log = log
self.host_source = host_source
self.deployer = push.ssh.SshDeployer(config, args, log)
for event_name in auto_events:
setattr(self, event_name, Event(self))
def _run_fetch_on_host(self, host, origin="origin"):
for repo in self.args.fetches:
self.deployer.run_deploy_command(host, "fetch", repo, origin)
def _deploy_to_host(self, host):
for repo in self.args.deploys:
self.deployer.run_deploy_command(host, "deploy", repo,
self.args.revisions[repo])
@event_wrapped
def synchronize(self):
for repo in self.args.fetches:
self.deployer.run_build_command("synchronize", repo)
self._run_fetch_on_host(self.config.deploy.build_host)
@event_wrapped
def resolve_refs(self):
for repo in self.args.deploys:
default_ref = self.config.default_refs.get(repo, "origin/master")
ref_to_deploy = self.args.revisions.get(repo, default_ref)
revision = self.deployer.run_build_command("get-revision", repo,
ref_to_deploy,
display_output=False)
self.args.revisions[repo] = revision.strip()
@event_wrapped
def build_static(self):
self.deployer.run_build_command("build-static")
@event_wrapped
def deploy_to_build_host(self):
self._deploy_to_host(self.config.deploy.build_host)
@event_wrapped
def process_host(self, host):
self._run_fetch_on_host(host)
self._deploy_to_host(host)
for command in self.args.deploy_commands:
self.deployer.run_deploy_command(host, *command)
def needs_static_build(self, repo):
try:
self.deployer.run_build_command("needs-static-build", repo,
display_output=False)
except push.ssh.SshError:
return False
else:
return True
@event_wrapped
def push(self):
try:
self._push()
finally:
self.deployer.shutdown()
ABORT = "abort"
RETRY = "retry"
CONTINUE = "continue"
def host_error_prompt(self, host, error):
return self.ABORT
@event_wrapped
def prompt_error(self, host, error):
return self.host_error_prompt(host, error)
def _push(self):
if self.args.fetches:
self.synchronize()
if self.args.deploys:
self.resolve_refs()
self.deploy_to_build_host()
if self.args.build_static:
build_static = False
for repo in self.args.deploys:
if repo == "public" or self.needs_static_build(repo):
build_static = True
break
if build_static:
self.build_static()
self.args.deploy_commands.append(["fetch-names"])
i = 0
while i < len(self.args.hosts):
host = self.args.hosts[i]
i += 1
try:
self.process_host(host)
except (push.ssh.SshError, IOError) as e:
if self.host_source.should_host_be_alive(host):
response = self.prompt_error(host, e)
if response == self.ABORT:
raise
elif response == self.CONTINUE:
continue
elif response == self.RETRY:
# rewind one host and try again
i -= 1
continue
else:
self.log.warning("Host %r appears to have been terminated."
" ignoring errors and continuing." % host)
def cancel_push(self, reason):
raise PushAborted(reason)
| {
"content_hash": "c331fe7e3a784e4ca7422337f315f7d7",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 79,
"avg_line_length": 31.083798882681563,
"alnum_prop": 0.5460100647016535,
"repo_name": "reddit/push",
"id": "870db629b6f972d2b620b937623fdfc7a254b5ed",
"size": "5564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "push/deploy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "47032"
}
],
"symlink_target": ""
} |
"""Django-specific customization."""
from __future__ import absolute_import, unicode_literals
import os
import sys
import warnings
from datetime import datetime
from importlib import import_module
from kombu.utils.imports import symbol_by_name
from kombu.utils.objects import cached_property
from celery import _state, signals
from celery.exceptions import FixupWarning, ImproperlyConfigured
__all__ = ('DjangoFixup', 'fixup')
ERR_NOT_INSTALLED = """\
Environment variable DJANGO_SETTINGS_MODULE is defined
but Django isn't installed. Won't apply Django fix-ups!
"""
def _maybe_close_fd(fh):
try:
os.close(fh.fileno())
except (AttributeError, OSError, TypeError):
# TypeError added for celery#962
pass
def _verify_django_version(django):
if django.VERSION < (1, 11):
raise ImproperlyConfigured('Celery 4.x requires Django 1.11 or later.')
def fixup(app, env='DJANGO_SETTINGS_MODULE'):
"""Install Django fixup if settings module environment is set."""
SETTINGS_MODULE = os.environ.get(env)
if SETTINGS_MODULE and 'django' not in app.loader_cls.lower():
try:
import django # noqa
except ImportError:
warnings.warn(FixupWarning(ERR_NOT_INSTALLED))
else:
_verify_django_version(django)
return DjangoFixup(app).install()
class DjangoFixup(object):
"""Fixup installed when using Django."""
def __init__(self, app):
self.app = app
if _state.default_app is None:
self.app.set_default()
self._worker_fixup = None
def install(self):
# Need to add project directory to path.
# The project directory has precedence over system modules,
# so we prepend it to the path.
sys.path.insert(0, os.getcwd())
self._settings = symbol_by_name('django.conf:settings')
self.app.loader.now = self.now
signals.import_modules.connect(self.on_import_modules)
signals.worker_init.connect(self.on_worker_init)
return self
@property
def worker_fixup(self):
if self._worker_fixup is None:
self._worker_fixup = DjangoWorkerFixup(self.app)
return self._worker_fixup
@worker_fixup.setter
def worker_fixup(self, value):
self._worker_fixup = value
def on_import_modules(self, **kwargs):
# call django.setup() before task modules are imported
self.worker_fixup.validate_models()
def on_worker_init(self, **kwargs):
self.worker_fixup.install()
def now(self, utc=False):
return datetime.utcnow() if utc else self._now()
def autodiscover_tasks(self):
from django.apps import apps
return [config.name for config in apps.get_app_configs()]
@cached_property
def _now(self):
return symbol_by_name('django.utils.timezone:now')
class DjangoWorkerFixup(object):
_db_recycles = 0
def __init__(self, app):
self.app = app
self.db_reuse_max = self.app.conf.get('CELERY_DB_REUSE_MAX', None)
self._db = import_module('django.db')
self._cache = import_module('django.core.cache')
self._settings = symbol_by_name('django.conf:settings')
self.interface_errors = (
symbol_by_name('django.db.utils.InterfaceError'),
)
self.DatabaseError = symbol_by_name('django.db:DatabaseError')
def django_setup(self):
import django
django.setup()
def validate_models(self):
from django.core.checks import run_checks
self.django_setup()
run_checks()
def install(self):
signals.beat_embedded_init.connect(self.close_database)
signals.worker_ready.connect(self.on_worker_ready)
signals.task_prerun.connect(self.on_task_prerun)
signals.task_postrun.connect(self.on_task_postrun)
signals.worker_process_init.connect(self.on_worker_process_init)
self.close_database()
self.close_cache()
return self
def on_worker_process_init(self, **kwargs):
# Child process must validate models again if on Windows,
# or if they were started using execv.
if os.environ.get('FORKED_BY_MULTIPROCESSING'):
self.validate_models()
# close connections:
# the parent process may have established these,
# so need to close them.
# calling db.close() on some DB connections will cause
# the inherited DB conn to also get broken in the parent
# process so we need to remove it without triggering any
# network IO that close() might cause.
for c in self._db.connections.all():
if c and c.connection:
self._maybe_close_db_fd(c.connection)
# use the _ version to avoid DB_REUSE preventing the conn.close() call
self._close_database()
self.close_cache()
def _maybe_close_db_fd(self, fd):
try:
_maybe_close_fd(fd)
except self.interface_errors:
pass
def on_task_prerun(self, sender, **kwargs):
"""Called before every task."""
if not getattr(sender.request, 'is_eager', False):
self.close_database()
def on_task_postrun(self, sender, **kwargs):
# See https://groups.google.com/group/django-users/
# browse_thread/thread/78200863d0c07c6d/
if not getattr(sender.request, 'is_eager', False):
self.close_database()
self.close_cache()
def close_database(self, **kwargs):
if not self.db_reuse_max:
return self._close_database()
if self._db_recycles >= self.db_reuse_max * 2:
self._db_recycles = 0
self._close_database()
self._db_recycles += 1
def _close_database(self):
for conn in self._db.connections.all():
try:
conn.close()
except self.interface_errors:
pass
except self.DatabaseError as exc:
str_exc = str(exc)
if 'closed' not in str_exc and 'not connected' not in str_exc:
raise
def close_cache(self):
try:
self._cache.close_caches()
except (TypeError, AttributeError):
pass
def on_worker_ready(self, **kwargs):
if self._settings.DEBUG:
warnings.warn('''Using settings.DEBUG leads to a memory
leak, never use this setting in production environments!''')
| {
"content_hash": "10e6bfd4534a1ae4db69303af5672494",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 79,
"avg_line_length": 32.206896551724135,
"alnum_prop": 0.6211379626797185,
"repo_name": "mdworks2016/work_development",
"id": "fe2a17224e6a3f3056ef0696045244826d8123bb",
"size": "6538",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/20_Third_Certification/venv/lib/python3.7/site-packages/celery/fixups/django.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "142"
},
{
"name": "Kotlin",
"bytes": "68744"
},
{
"name": "Python",
"bytes": "1080"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import errno
import gc
import os
import pprint
import socket
import sys
import traceback
import eventlet
import eventlet.backdoor
import greenlet
from oslo.config import cfg
from octavia.openstack.common.gettextutils import _LI
from octavia.openstack.common import log as logging
help_for_backdoor_port = (
"Acceptable values are 0, <port>, and <start>:<end>, where 0 results "
"in listening on a random tcp port number; <port> results in listening "
"on the specified port number (and not enabling backdoor if that port "
"is in use); and <start>:<end> results in listening on the smallest "
"unused port number within the specified range of port numbers. The "
"chosen port is displayed in the service's log file.")
eventlet_backdoor_opts = [
cfg.StrOpt('backdoor_port',
help="Enable eventlet backdoor. %s" % help_for_backdoor_port)
]
CONF = cfg.CONF
CONF.register_opts(eventlet_backdoor_opts)
LOG = logging.getLogger(__name__)
class EventletBackdoorConfigValueError(Exception):
def __init__(self, port_range, help_msg, ex):
msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
'%(help)s' %
{'range': port_range, 'ex': ex, 'help': help_msg})
super(EventletBackdoorConfigValueError, self).__init__(msg)
self.port_range = port_range
def _dont_use_this():
print("Don't use this, just disconnect instead")
def _find_objects(t):
return [o for o in gc.get_objects() if isinstance(o, t)]
def _print_greenthreads():
for i, gt in enumerate(_find_objects(greenlet.greenlet)):
print(i, gt)
traceback.print_stack(gt.gr_frame)
print()
def _print_nativethreads():
for threadId, stack in sys._current_frames().items():
print(threadId)
traceback.print_stack(stack)
print()
def _parse_port_range(port_range):
if ':' not in port_range:
start, end = port_range, port_range
else:
start, end = port_range.split(':', 1)
try:
start, end = int(start), int(end)
if end < start:
raise ValueError
return start, end
except ValueError as ex:
raise EventletBackdoorConfigValueError(port_range, ex,
help_for_backdoor_port)
def _listen(host, start_port, end_port, listen_func):
try_port = start_port
while True:
try:
return listen_func((host, try_port))
except socket.error as exc:
if (exc.errno != errno.EADDRINUSE or
try_port >= end_port):
raise
try_port += 1
def initialize_if_enabled():
backdoor_locals = {
'exit': _dont_use_this, # So we don't exit the entire process
'quit': _dont_use_this, # So we don't exit the entire process
'fo': _find_objects,
'pgt': _print_greenthreads,
'pnt': _print_nativethreads,
}
if CONF.backdoor_port is None:
return None
start_port, end_port = _parse_port_range(str(CONF.backdoor_port))
# NOTE(johannes): The standard sys.displayhook will print the value of
# the last expression and set it to __builtin__._, which overwrites
# the __builtin__._ that gettext sets. Let's switch to using pprint
# since it won't interact poorly with gettext, and it's easier to
# read the output too.
def displayhook(val):
if val is not None:
pprint.pprint(val)
sys.displayhook = displayhook
sock = _listen('localhost', start_port, end_port, eventlet.listen)
# In the case of backdoor port being zero, a port number is assigned by
# listen(). In any case, pull the port number out here.
port = sock.getsockname()[1]
LOG.info(
_LI('Eventlet backdoor listening on %(port)s for process %(pid)d') %
{'port': port, 'pid': os.getpid()}
)
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
locals=backdoor_locals)
return port
| {
"content_hash": "572624651482da8db81914239bf83a83",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 77,
"avg_line_length": 31.503875968992247,
"alnum_prop": 0.6304133858267716,
"repo_name": "brandonlogan/octavia",
"id": "3d4a83685fa75187fe88e1760e8c0c95f725963d",
"size": "4775",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "octavia/openstack/common/eventlet_backdoor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "325983"
}
],
"symlink_target": ""
} |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Difference'] , ['PolyTrend'] , ['Seasonal_DayOfWeek'] , ['LSTM'] ); | {
"content_hash": "c589995c004091440a8cc02034c4591a",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 90,
"avg_line_length": 40.75,
"alnum_prop": 0.7177914110429447,
"repo_name": "antoinecarme/pyaf",
"id": "0e84e3133674e4b45490fecfa7f407f795e0e9cb",
"size": "163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_Difference/model_control_one_enabled_Difference_PolyTrend_Seasonal_DayOfWeek_LSTM.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
"""{{ project_name }} URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.urls import include, path
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
# path('admin/', include(admin.site.urls)),
# path('api/v1/', include('mdot_rest.urls')),
path('', include('project.base_urls')),
path('', include('mdot.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| {
"content_hash": "9cea73e875e4f99a8b9e4b73f8f0a858",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 80,
"avg_line_length": 36.96551724137931,
"alnum_prop": 0.6940298507462687,
"repo_name": "charlon/mdot",
"id": "0202cd0d07ff1c5255ac1def28d4ee6aae436bf4",
"size": "1072",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docker/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8862"
},
{
"name": "HTML",
"bytes": "21389"
},
{
"name": "JavaScript",
"bytes": "283"
},
{
"name": "Python",
"bytes": "47900"
}
],
"symlink_target": ""
} |
"""
This example shows how you can use vdist to use a subdirectory
under your source tree to use as the base for your OS package
You will still be able to use git branching to point to the right
release, since vdist will first checkout the parent, and set apart
the subdirectory after switching to the right branch
"""
from vdist.builder import Builder
from vdist.source import git
builder = Builder()
builder.add_build(
name='my great project',
app='myproject',
version='1.0',
source=git(
uri='https://github.com/someuser/someproject',
branch='your-release-branch'
),
profile='ubuntu-trusty',
# specify 'subapp' as the working directory for this build;
# this means that only the subapp directory will be built and
# packaged
# This also means that vdist will look for a pip requirements
# file in this directory
working_dir='subapp'
)
builder.build()
| {
"content_hash": "868c0212f9fa4197959f1899b2c0a871",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 66,
"avg_line_length": 30.666666666666668,
"alnum_prop": 0.7152173913043478,
"repo_name": "objectified/vdist",
"id": "8a2fbc29a8cb8d0548f51f7707d356ba56f2dccc",
"size": "920",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/assemble_from_subdirectory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42060"
},
{
"name": "Shell",
"bytes": "13694"
}
],
"symlink_target": ""
} |
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/policy_group -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_adm_policy_group
short_description: Module to manage openshift policy for groups
description:
- Manage openshift policy for groups.
options:
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
namespace:
description:
- The namespace scope
required: false
default: None
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
group:
description:
- The name of the group
required: true
default: None
aliases: []
resource_kind:
description:
- The kind of policy to affect
required: true
default: None
choices: ["role", "cluster-role", "scc"]
aliases: []
resource_name:
description:
- The name of the policy
required: true
default: None
aliases: []
state:
description:
- Desired state of the policy
required: true
default: present
choices: ["present", "absent"]
aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: oc adm policy remove-scc-from-group an-scc agroup
oc_adm_policy_group:
group: agroup
resource_kind: scc
resource_name: an-scc
state: absent
- name: oc adm policy add-cluster-role-to-group system:build-strategy-docker agroup
oc_adm_policy_group:
group: agroup
resource_kind: cluster-role
resource_name: system:build-strategy-docker
state: present
'''
# -*- -*- -*- End included fragment: doc/policy_group -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key']) or {}
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode(), stderr.decode()
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
rval = {}
results = ''
err = None
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"results": results,
"cmd": ' '.join(cmds)}
if returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {}})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(contents)
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import yum
yum_base = yum.YumBase()
if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
return True
return False
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self):
'''return all options as a string'''
return self.stringify()
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/rolebinding.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class RoleBindingConfig(object):
''' Handle rolebinding config '''
# pylint: disable=too-many-arguments
def __init__(self,
name,
namespace,
kubeconfig,
group_names=None,
role_ref=None,
subjects=None,
usernames=None):
''' constructor for handling rolebinding options '''
self.kubeconfig = kubeconfig
self.name = name
self.namespace = namespace
self.group_names = group_names
self.role_ref = role_ref
self.subjects = subjects
self.usernames = usernames
self.data = {}
self.create_dict()
def create_dict(self):
''' create a default rolebinding as a dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'RoleBinding'
self.data['groupNames'] = self.group_names
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
self.data['roleRef'] = self.role_ref
self.data['subjects'] = self.subjects
self.data['userNames'] = self.usernames
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class RoleBinding(Yedit):
''' Class to model a rolebinding openshift object'''
group_names_path = "groupNames"
role_ref_path = "roleRef"
subjects_path = "subjects"
user_names_path = "userNames"
kind = 'RoleBinding'
def __init__(self, content):
'''RoleBinding constructor'''
super(RoleBinding, self).__init__(content=content)
self._subjects = None
self._role_ref = None
self._group_names = None
self._user_names = None
@property
def subjects(self):
''' subjects property '''
if self._subjects is None:
self._subjects = self.get_subjects()
return self._subjects
@subjects.setter
def subjects(self, data):
''' subjects property setter'''
self._subjects = data
@property
def role_ref(self):
''' role_ref property '''
if self._role_ref is None:
self._role_ref = self.get_role_ref()
return self._role_ref
@role_ref.setter
def role_ref(self, data):
''' role_ref property setter'''
self._role_ref = data
@property
def group_names(self):
''' group_names property '''
if self._group_names is None:
self._group_names = self.get_group_names()
return self._group_names
@group_names.setter
def group_names(self, data):
''' group_names property setter'''
self._group_names = data
@property
def user_names(self):
''' user_names property '''
if self._user_names is None:
self._user_names = self.get_user_names()
return self._user_names
@user_names.setter
def user_names(self, data):
''' user_names property setter'''
self._user_names = data
def get_group_names(self):
''' return groupNames '''
return self.get(RoleBinding.group_names_path) or []
def get_user_names(self):
''' return usernames '''
return self.get(RoleBinding.user_names_path) or []
def get_role_ref(self):
''' return role_ref '''
return self.get(RoleBinding.role_ref_path) or {}
def get_subjects(self):
''' return subjects '''
return self.get(RoleBinding.subjects_path) or []
#### ADD #####
def add_subject(self, inc_subject):
''' add a subject '''
if self.subjects:
# pylint: disable=no-member
self.subjects.append(inc_subject)
else:
self.put(RoleBinding.subjects_path, [inc_subject])
return True
def add_role_ref(self, inc_role_ref):
''' add a role_ref '''
if not self.role_ref:
self.put(RoleBinding.role_ref_path, {"name": inc_role_ref})
return True
return False
def add_group_names(self, inc_group_names):
''' add a group_names '''
if self.group_names:
# pylint: disable=no-member
self.group_names.append(inc_group_names)
else:
self.put(RoleBinding.group_names_path, [inc_group_names])
return True
def add_user_name(self, inc_user_name):
''' add a username '''
if self.user_names:
# pylint: disable=no-member
self.user_names.append(inc_user_name)
else:
self.put(RoleBinding.user_names_path, [inc_user_name])
return True
#### /ADD #####
#### Remove #####
def remove_subject(self, inc_subject):
''' remove a subject '''
try:
# pylint: disable=no-member
self.subjects.remove(inc_subject)
except ValueError as _:
return False
return True
def remove_role_ref(self, inc_role_ref):
''' remove a role_ref '''
if self.role_ref and self.role_ref['name'] == inc_role_ref:
del self.role_ref['name']
return True
return False
def remove_group_name(self, inc_group_name):
''' remove a groupname '''
try:
# pylint: disable=no-member
self.group_names.remove(inc_group_name)
except ValueError as _:
return False
return True
def remove_user_name(self, inc_user_name):
''' remove a username '''
try:
# pylint: disable=no-member
self.user_names.remove(inc_user_name)
except ValueError as _:
return False
return True
#### /REMOVE #####
#### UPDATE #####
def update_subject(self, inc_subject):
''' update a subject '''
try:
# pylint: disable=no-member
index = self.subjects.index(inc_subject)
except ValueError as _:
return self.add_subject(inc_subject)
self.subjects[index] = inc_subject
return True
def update_group_name(self, inc_group_name):
''' update a groupname '''
try:
# pylint: disable=no-member
index = self.group_names.index(inc_group_name)
except ValueError as _:
return self.add_group_names(inc_group_name)
self.group_names[index] = inc_group_name
return True
def update_user_name(self, inc_user_name):
''' update a username '''
try:
# pylint: disable=no-member
index = self.user_names.index(inc_user_name)
except ValueError as _:
return self.add_user_name(inc_user_name)
self.user_names[index] = inc_user_name
return True
def update_role_ref(self, inc_role_ref):
''' update a role_ref '''
self.role_ref['name'] = inc_role_ref
return True
#### /UPDATE #####
#### FIND ####
def find_subject(self, inc_subject):
''' find a subject '''
index = None
try:
# pylint: disable=no-member
index = self.subjects.index(inc_subject)
except ValueError as _:
return index
return index
def find_group_name(self, inc_group_name):
''' find a group_name '''
index = None
try:
# pylint: disable=no-member
index = self.group_names.index(inc_group_name)
except ValueError as _:
return index
return index
def find_user_name(self, inc_user_name):
''' find a user_name '''
index = None
try:
# pylint: disable=no-member
index = self.user_names.index(inc_user_name)
except ValueError as _:
return index
return index
def find_role_ref(self, inc_role_ref):
''' find a user_name '''
if self.role_ref and self.role_ref['name'] == inc_role_ref['name']:
return self.role_ref
return None
# -*- -*- -*- End included fragment: lib/rolebinding.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/scc.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class SecurityContextConstraintsConfig(object):
''' Handle scc options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
kubeconfig,
options=None,
fs_group='MustRunAs',
default_add_capabilities=None,
groups=None,
priority=None,
required_drop_capabilities=None,
run_as_user='MustRunAsRange',
se_linux_context='MustRunAs',
supplemental_groups='RunAsAny',
users=None,
annotations=None):
''' constructor for handling scc options '''
self.kubeconfig = kubeconfig
self.name = sname
self.options = options
self.fs_group = fs_group
self.default_add_capabilities = default_add_capabilities
self.groups = groups
self.priority = priority
self.required_drop_capabilities = required_drop_capabilities
self.run_as_user = run_as_user
self.se_linux_context = se_linux_context
self.supplemental_groups = supplemental_groups
self.users = users
self.annotations = annotations
self.data = {}
self.create_dict()
def create_dict(self):
''' assign the correct properties for a scc dict '''
# allow options
if self.options:
for key, value in self.options.items():
self.data[key] = value
else:
self.data['allowHostDirVolumePlugin'] = False
self.data['allowHostIPC'] = False
self.data['allowHostNetwork'] = False
self.data['allowHostPID'] = False
self.data['allowHostPorts'] = False
self.data['allowPrivilegedContainer'] = False
self.data['allowedCapabilities'] = None
# version
self.data['apiVersion'] = 'v1'
# kind
self.data['kind'] = 'SecurityContextConstraints'
# defaultAddCapabilities
self.data['defaultAddCapabilities'] = self.default_add_capabilities
# fsGroup
self.data['fsGroup']['type'] = self.fs_group
# groups
self.data['groups'] = []
if self.groups:
self.data['groups'] = self.groups
# metadata
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
if self.annotations:
for key, value in self.annotations.items():
self.data['metadata'][key] = value
# priority
self.data['priority'] = self.priority
# requiredDropCapabilities
self.data['requiredDropCapabilities'] = self.required_drop_capabilities
# runAsUser
self.data['runAsUser'] = {'type': self.run_as_user}
# seLinuxContext
self.data['seLinuxContext'] = {'type': self.se_linux_context}
# supplementalGroups
self.data['supplementalGroups'] = {'type': self.supplemental_groups}
# users
self.data['users'] = []
if self.users:
self.data['users'] = self.users
# pylint: disable=too-many-instance-attributes,too-many-public-methods,no-member
class SecurityContextConstraints(Yedit):
''' Class to wrap the oc command line tools '''
default_add_capabilities_path = "defaultAddCapabilities"
fs_group_path = "fsGroup"
groups_path = "groups"
priority_path = "priority"
required_drop_capabilities_path = "requiredDropCapabilities"
run_as_user_path = "runAsUser"
se_linux_context_path = "seLinuxContext"
supplemental_groups_path = "supplementalGroups"
users_path = "users"
kind = 'SecurityContextConstraints'
def __init__(self, content):
'''SecurityContextConstraints constructor'''
super(SecurityContextConstraints, self).__init__(content=content)
self._users = None
self._groups = None
@property
def users(self):
''' users property getter '''
if self._users is None:
self._users = self.get_users()
return self._users
@property
def groups(self):
''' groups property getter '''
if self._groups is None:
self._groups = self.get_groups()
return self._groups
@users.setter
def users(self, data):
''' users property setter'''
self._users = data
@groups.setter
def groups(self, data):
''' groups property setter'''
self._groups = data
def get_users(self):
'''get scc users'''
return self.get(SecurityContextConstraints.users_path) or []
def get_groups(self):
'''get scc groups'''
return self.get(SecurityContextConstraints.groups_path) or []
def add_user(self, inc_user):
''' add a user '''
if self.users:
self.users.append(inc_user)
else:
self.put(SecurityContextConstraints.users_path, [inc_user])
return True
def add_group(self, inc_group):
''' add a group '''
if self.groups:
self.groups.append(inc_group)
else:
self.put(SecurityContextConstraints.groups_path, [inc_group])
return True
def remove_user(self, inc_user):
''' remove a user '''
try:
self.users.remove(inc_user)
except ValueError as _:
return False
return True
def remove_group(self, inc_group):
''' remove a group '''
try:
self.groups.remove(inc_group)
except ValueError as _:
return False
return True
def update_user(self, inc_user):
''' update a user '''
try:
index = self.users.index(inc_user)
except ValueError as _:
return self.add_user(inc_user)
self.users[index] = inc_user
return True
def update_group(self, inc_group):
''' update a group '''
try:
index = self.groups.index(inc_group)
except ValueError as _:
return self.add_group(inc_group)
self.groups[index] = inc_group
return True
def find_user(self, inc_user):
''' find a user '''
index = None
try:
index = self.users.index(inc_user)
except ValueError as _:
return index
return index
def find_group(self, inc_group):
''' find a group '''
index = None
try:
index = self.groups.index(inc_group)
except ValueError as _:
return index
return index
# -*- -*- -*- End included fragment: lib/scc.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_adm_policy_group.py -*- -*- -*-
class PolicyGroupException(Exception):
''' PolicyGroup exception'''
pass
class PolicyGroupConfig(OpenShiftCLIConfig):
''' PolicyGroupConfig is a DTO for group related policy. '''
def __init__(self, namespace, kubeconfig, policy_options):
super(PolicyGroupConfig, self).__init__(policy_options['name']['value'],
namespace, kubeconfig, policy_options)
self.kind = self.get_kind()
self.namespace = namespace
def get_kind(self):
''' return the kind we are working with '''
if self.config_options['resource_kind']['value'] == 'role':
return 'rolebinding'
elif self.config_options['resource_kind']['value'] == 'cluster-role':
return 'clusterrolebinding'
elif self.config_options['resource_kind']['value'] == 'scc':
return 'scc'
return None
# pylint: disable=too-many-return-statements
class PolicyGroup(OpenShiftCLI):
''' Class to handle attaching policies to users '''
def __init__(self,
config,
verbose=False):
''' Constructor for PolicyGroup '''
super(PolicyGroup, self).__init__(config.namespace, config.kubeconfig, verbose)
self.config = config
self.verbose = verbose
self._rolebinding = None
self._scc = None
self._cluster_policy_bindings = None
self._policy_bindings = None
@property
def policybindings(self):
if self._policy_bindings is None:
results = self._get('clusterpolicybindings', None)
if results['returncode'] != 0:
raise OpenShiftCLIError('Could not retrieve policybindings')
self._policy_bindings = results['results'][0]['items'][0]
return self._policy_bindings
@property
def clusterpolicybindings(self):
if self._cluster_policy_bindings is None:
results = self._get('clusterpolicybindings', None)
if results['returncode'] != 0:
raise OpenShiftCLIError('Could not retrieve clusterpolicybindings')
self._cluster_policy_bindings = results['results'][0]['items'][0]
return self._cluster_policy_bindings
@property
def role_binding(self):
''' role_binding getter '''
return self._rolebinding
@role_binding.setter
def role_binding(self, binding):
''' role_binding setter '''
self._rolebinding = binding
@property
def security_context_constraint(self):
''' security_context_constraint getter '''
return self._scc
@security_context_constraint.setter
def security_context_constraint(self, scc):
''' security_context_constraint setter '''
self._scc = scc
def get(self):
'''fetch the desired kind'''
resource_name = self.config.config_options['name']['value']
if resource_name == 'cluster-reader':
resource_name += 's'
# oc adm policy add-... creates policy bindings with the name
# "[resource_name]-binding", however some bindings in the system
# simply use "[resource_name]". So try both.
results = self._get(self.config.kind, resource_name)
if results['returncode'] == 0:
return results
# Now try -binding naming convention
return self._get(self.config.kind, resource_name + "-binding")
def exists_role_binding(self):
''' return whether role_binding exists '''
bindings = None
if self.config.config_options['resource_kind']['value'] == 'cluster-role':
bindings = self.clusterpolicybindings
else:
bindings = self.policybindings
if bindings is None:
return False
for binding in bindings['roleBindings']:
_rb = binding['roleBinding']
if _rb['roleRef']['name'] == self.config.config_options['name']['value'] and \
_rb['groupNames'] is not None and \
self.config.config_options['group']['value'] in _rb['groupNames']:
self.role_binding = binding
return True
return False
def exists_scc(self):
''' return whether scc exists '''
results = self.get()
if results['returncode'] == 0:
self.security_context_constraint = SecurityContextConstraints(results['results'][0])
if self.security_context_constraint.find_group(self.config.config_options['group']['value']) != None:
return True
return False
return results
def exists(self):
'''does the object exist?'''
if self.config.config_options['resource_kind']['value'] == 'cluster-role':
return self.exists_role_binding()
elif self.config.config_options['resource_kind']['value'] == 'role':
return self.exists_role_binding()
elif self.config.config_options['resource_kind']['value'] == 'scc':
return self.exists_scc()
return False
def perform(self):
'''perform action on resource'''
cmd = ['policy',
self.config.config_options['action']['value'],
self.config.config_options['name']['value'],
self.config.config_options['group']['value']]
return self.openshift_cmd(cmd, oadm=True)
@staticmethod
def run_ansible(params, check_mode):
'''run the idempotent ansible code'''
state = params['state']
action = None
if state == 'present':
action = 'add-' + params['resource_kind'] + '-to-group'
else:
action = 'remove-' + params['resource_kind'] + '-from-group'
nconfig = PolicyGroupConfig(params['namespace'],
params['kubeconfig'],
{'action': {'value': action, 'include': False},
'group': {'value': params['group'], 'include': False},
'resource_kind': {'value': params['resource_kind'], 'include': False},
'name': {'value': params['resource_name'], 'include': False},
})
policygroup = PolicyGroup(nconfig, params['debug'])
# Run the oc adm policy group related command
########
# Delete
########
if state == 'absent':
if not policygroup.exists():
return {'changed': False, 'state': 'absent'}
if check_mode:
return {'changed': False, 'msg': 'CHECK_MODE: would have performed a delete.'}
api_rval = policygroup.perform()
if api_rval['returncode'] != 0:
return {'msg': api_rval}
return {'changed': True, 'results' : api_rval, state:'absent'}
if state == 'present':
########
# Create
########
results = policygroup.exists()
if isinstance(results, dict) and 'returncode' in results and results['returncode'] != 0:
return {'msg': results}
if not results:
if check_mode:
return {'changed': False, 'msg': 'CHECK_MODE: would have performed a create.'}
api_rval = policygroup.perform()
if api_rval['returncode'] != 0:
return {'msg': api_rval}
return {'changed': True, 'results': api_rval, state: 'present'}
return {'changed': False, state: 'present'}
return {'failed': True, 'changed': False, 'results': 'Unknown state passed. %s' % state, state: 'unknown'}
# -*- -*- -*- End included fragment: class/oc_adm_policy_group.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_adm_policy_group.py -*- -*- -*-
def main():
'''
ansible oc adm module for group policy
'''
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', type='str',
choices=['present', 'absent']),
debug=dict(default=False, type='bool'),
resource_name=dict(required=True, type='str'),
namespace=dict(default='default', type='str'),
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
group=dict(required=True, type='str'),
resource_kind=dict(required=True, choices=['role', 'cluster-role', 'scc'], type='str'),
),
supports_check_mode=True,
)
results = PolicyGroup.run_ansible(module.params, module.check_mode)
if 'failed' in results:
module.fail_json(**results)
module.exit_json(**results)
if __name__ == "__main__":
main()
# -*- -*- -*- End included fragment: ansible/oc_adm_policy_group.py -*- -*- -*-
| {
"content_hash": "e2ad5b5826ff9b90de6b4a2e1150a8aa",
"timestamp": "",
"source": "github",
"line_count": 2147,
"max_line_length": 118,
"avg_line_length": 32.69399161620866,
"alnum_prop": 0.5389491979371456,
"repo_name": "nhr/openshift-ansible",
"id": "35168d1a3ceb2634f1e15bd1c3e48f0681dbe91f",
"size": "71356",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "roles/lib_openshift/library/oc_adm_policy_group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "14650"
},
{
"name": "Python",
"bytes": "3094390"
},
{
"name": "Roff",
"bytes": "5645"
},
{
"name": "Shell",
"bytes": "99186"
}
],
"symlink_target": ""
} |
import json
from jsonschema import validate, Draft7Validator
from glyphsLib.parser import Parser
print("----- validate 3")
f = open("Glyphs3FileShema.json",)
schema = json.load(f)
p = Parser()
fp = open("../GlyphsFileFormatv3.glyphs", "r", encoding="utf-8")
data = p.parse(fp.read())
validate(instance=data, schema=schema)
print("----- validate 2")
f = open("Glyphs2FileShema.json",)
schema = json.load(f)
p = Parser()
fp = open("../GlyphsFileFormatv2.glyphs", "r", encoding="utf-8")
data = p.parse(fp.read())
validator = Draft7Validator(schema)
validator.validate(instance=data)
| {
"content_hash": "bac7e8eeb9349e6adf71fd46c75c189e",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 64,
"avg_line_length": 25.52173913043478,
"alnum_prop": 0.706984667802385,
"repo_name": "schriftgestalt/GlyphsSDK",
"id": "15e64aa1cf311e2a4898ff6dada94422b4f2aaf8",
"size": "587",
"binary": false,
"copies": "1",
"ref": "refs/heads/Glyphs3",
"path": "GlyphsFileFormat/validator/validator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "47278"
},
{
"name": "HTML",
"bytes": "710659"
},
{
"name": "Objective-C",
"bytes": "445848"
},
{
"name": "Python",
"bytes": "570134"
},
{
"name": "Shell",
"bytes": "80"
}
],
"symlink_target": ""
} |
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accession', '0011_auto_20180409_1353'),
]
operations = [
migrations.AddField(
model_name='accessionpanel',
name='panel_type',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='action',
name='format',
field=django.contrib.postgres.fields.jsonb.JSONField(default={'steps': []}),
),
migrations.AddField(
model_name='batchpanel',
name='panel_type',
field=models.IntegerField(default=0),
),
]
| {
"content_hash": "ff207b712bf8ee140cc517807ba17a65",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 88,
"avg_line_length": 27.37037037037037,
"alnum_prop": 0.5723951285520974,
"repo_name": "coll-gate/collgate",
"id": "5468b4126e5e7e547d48c7e01b3e466fa5263c15",
"size": "788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/accession/migrations/0012_auto_20180417_1103.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20334"
},
{
"name": "HTML",
"bytes": "245334"
},
{
"name": "JavaScript",
"bytes": "5131841"
},
{
"name": "Python",
"bytes": "1291968"
},
{
"name": "Shell",
"bytes": "126"
}
],
"symlink_target": ""
} |
"""
__MT_pre__Protocol.py_____________________________________________________
Automatically generated AToM3 syntactic object (DO NOT MODIFY DIRECTLY)
Author: gehan
Modified: Sun Feb 15 10:22:14 2015
__________________________________________________________________________
"""
from ASGNode import *
from ATOM3Type import *
from ATOM3Text import *
from ATOM3String import *
from ATOM3Boolean import *
from graph_MT_pre__Protocol import *
class MT_pre__Protocol(ASGNode, ATOM3Type):
def __init__(self, parent = None):
ASGNode.__init__(self)
ATOM3Type.__init__(self)
self.superTypes = ['MT_pre__NamedElement', 'MT_pre__MetaModelElement_S']
self.graphClass_ = graph_MT_pre__Protocol
self.isGraphObjectVisual = True
if(hasattr(self, '_setHierarchicalLink')):
self._setHierarchicalLink(False)
if(hasattr(self, '_setHierarchicalNode')):
self._setHierarchicalNode(False)
self.parent = parent
self.MT_pre__cardinality=ATOM3Text('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n', 80,15 )
self.MT_pre__cardinality=ATOM3Text('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n', 80,15 )
self.MT_pre__cardinality=ATOM3Text('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n', 80,15 )
self.MT_pre__classtype=ATOM3Text('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n', 80,15 )
self.MT_pre__classtype=ATOM3Text('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n', 80,15 )
self.MT_pre__classtype=ATOM3Text('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n', 80,15 )
self.MT_pre__name=ATOM3Text('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n', 80,15 )
self.MT_pre__name=ATOM3Text('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n', 80,15 )
self.MT_pre__name=ATOM3Text('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n', 80,15 )
self.MT_label__=ATOM3String('', 20)
self.MT_pivotOut__=ATOM3String('', 20)
self.MT_pivotIn__=ATOM3String('', 20)
self.MT_subtypeMatching__=ATOM3Boolean()
self.MT_subtypeMatching__.setValue(('True', 0))
self.MT_subtypeMatching__.config = 0
self.generatedAttributes = {'MT_pre__cardinality': ('ATOM3Text', ),
'MT_pre__cardinality': ('ATOM3Text', ),
'MT_pre__cardinality': ('ATOM3Text', ),
'MT_pre__classtype': ('ATOM3Text', ),
'MT_pre__classtype': ('ATOM3Text', ),
'MT_pre__classtype': ('ATOM3Text', ),
'MT_pre__name': ('ATOM3Text', ),
'MT_pre__name': ('ATOM3Text', ),
'MT_pre__name': ('ATOM3Text', ),
'MT_label__': ('ATOM3String', ),
'MT_pivotOut__': ('ATOM3String', ),
'MT_pivotIn__': ('ATOM3String', ),
'MT_subtypeMatching__': ('ATOM3Boolean', ) }
self.realOrder = ['MT_pre__cardinality','MT_pre__cardinality','MT_pre__cardinality','MT_pre__classtype','MT_pre__classtype','MT_pre__classtype','MT_pre__name','MT_pre__name','MT_pre__name','MT_label__','MT_pivotOut__','MT_pivotIn__','MT_subtypeMatching__']
self.directEditing = [0,0,0,0,0,0,0,0,0,1,1,1,1]
def clone(self):
cloneObject = MT_pre__Protocol( self.parent )
for atr in self.realOrder:
cloneObject.setAttrValue(atr, self.getAttrValue(atr).clone() )
ASGNode.cloneActions(self, cloneObject)
return cloneObject
def copy(self, other):
ATOM3Type.copy(self, other)
for atr in self.realOrder:
self.setAttrValue(atr, other.getAttrValue(atr) )
ASGNode.copy(self, other)
def preCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.preCondition(actionID, params)
else: return None
def postCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postCondition(actionID, params)
else: return None
def preAction (self, actionID, * params):
if actionID == self.CREATE:
self.autoIncrLabel(params)
if self.graphObject_:
return self.graphObject_.preAction(actionID, params)
else: return None
def postAction (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postAction(actionID, params)
else: return None
def QOCA(self, params):
"""
QOCA Constraint Template
NOTE: DO NOT select a POST/PRE action trigger
Constraints will be added/removed in a logical manner by other mechanisms.
"""
return # <---- Remove this to use QOCA
""" Get the high level constraint helper and solver """
from Qoca.atom3constraints.OffsetConstraints import OffsetConstraints
oc = OffsetConstraints(self.parent.qocaSolver)
"""
Example constraint, see Kernel/QOCA/atom3constraints/OffsetConstraints.py
For more types of constraints
"""
oc.fixedWidth(self.graphObject_, self.graphObject_.sizeX)
oc.fixedHeight(self.graphObject_, self.graphObject_.sizeY)
def autoIncrLabel(self, params):
#===============================================================================
# Auto increment the label
#===============================================================================
# If there is already one, ignore
if not self.MT_label__.isNone(): return
# Get the maximum label of all MT_pre__ elements
label = 0
for nt in self.parent.ASGroot.listNodes:
if nt.startswith('MT_pre__'):
for node in self.parent.ASGroot.listNodes[nt]:
currLabel = 0
try:
currLabel = int(node.MT_label__.getValue())
except:
pass
if currLabel > label:
label = currLabel
# The label of this instance will be the max label + 1
self.MT_label__.setValue(str(label + 1))
| {
"content_hash": "92f1be2a03b9af5decdf231a99ea4de2",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 630,
"avg_line_length": 79.4927536231884,
"alnum_prop": 0.5503190519598906,
"repo_name": "levilucio/SyVOLT",
"id": "642e809f2226e3f2b9a89671291c74aee8e9a2fc",
"size": "10970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UMLRT2Kiltera_MM/MT_pre__Protocol.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
import sys
import psycopg2
from nfldb import connect as nfldb_connect, api_version
from nfldb import Tx, set_timezone
from nfldb.db import _db_name, _mogrify, _bind_type
from nfldb.types import _player_categories, _Enum
from nfldb.team import teams
from nfldbproj.types import ProjEnums
__pdoc__ = {}
nfldbproj_api_version = 1
__pdoc__['nfldbproj_api_version'] = \
"""
The nfldbproj schema version that this library corresponds to. When the schema
version of the database is less than this value, `nfldbproj.connect` will
automatically update the schema to the latest version before doing
anything else.
"""
nfldb_api_version = 7
__pdoc__['nfldb_api_version'] = \
"""
The nfldb schema version that this library corresponds to.
The database will only be updated from this version.
"""
nfldbproj_tables = {
'nfldbproj_meta',
'projection_source',
'fp_system',
'projection_set',
'stat_projection',
'fp_projection',
'dfs_site',
'dfs_salary',
'name_disambiguation',
'fp_score',
'fantasy_player',
}
nfldbproj_types = {
'fantasy_position',
'proj_scope',
'uinteger',
}
def uninstall(db, really_uninstall=False):
"""Remove all traces of nfldb-projections."""
if really_uninstall:
print('Removing all traces of nfldb-projections...', end='')
with Tx(db) as c:
c.execute('DROP TABLE {}'.format(', '.join(nfldbproj_tables)))
c.execute('DROP TYPE {}'.format(', '.join(nfldbproj_types)))
c.execute('DROP FUNCTION add_fantasy_player() CASCADE')
print('done.')
else:
print('Uninstall not executed. Pass keyword argument really_uninstall=True to confirm.')
def connect(**kwargs):
"""
A wrapper around nfldb.connect.
Returns a `psycopg2._psycopg.connection` object from the
`psycopg2.connect` function. If database is `None`, then `connect`
will look for a configuration file using `nfldb.config` with
`config_path`. Otherwise, the connection will use the parameters
given.
If `database` is `None` and no config file can be found, then an
`IOError` exception is raised.
This function will also compare the current schema version of the
database against the API version `nfldb.api_version` and assert
that they are equivalent. If the schema library version is less
than the the API version, then the schema will be automatically
upgraded. If the schema version is newer than the library version,
then this function will raise an assertion error. An assertion
error will also be raised if the schema version is 0 and the
database is not empty.
In addition, a similar updating will be performed for nfldbproj.
N.B. The `timezone` parameter should be set to a value that
PostgreSQL will accept. Select from the `pg_timezone_names` view
to get a list of valid time zones.
"""
conn = nfldb_connect(**kwargs)
# Migration.
nfldbproj_sversion = nfldbproj_schema_version(conn)
assert nfldbproj_sversion <= nfldbproj_api_version, \
'nfldbproj library version {} is older than schema with version {}'.format(
nfldbproj_api_version, nfldbproj_sversion
)
assert api_version == nfldb_api_version, \
'nfldbproj expects nfldb version {}, encountered nfldb version {}'.format(
nfldb_api_version, api_version
)
assert nfldbproj_sversion > 0 or (nfldbproj_sversion == 0 and _nfldbproj_is_empty(conn)), \
'nfldbproj schema has version 0 but is not empty'
set_timezone(conn, 'UTC')
_migrate_nfldbproj(conn, nfldbproj_api_version)
if kwargs.get('timezone'):
set_timezone(conn, kwargs['timezone'])
# Bind SQL -> Python casting functions for additional types.
_bind_type(conn, 'fantasy_position', _Enum._pg_cast(ProjEnums.fantasy_position))
_bind_type(conn, 'proj_scope', _Enum._pg_cast(ProjEnums.proj_scope))
return conn
def nfldbproj_schema_version(conn):
"""
Returns the schema version of the given database. If the version
is not stored in the database, then `0` is returned.
"""
with Tx(conn) as c:
try:
c.execute('SELECT nfldbproj_version FROM nfldbproj_meta LIMIT 1',
['nfldbproj_version'])
except psycopg2.ProgrammingError:
return 0
if c.rowcount == 0:
return 0
return c.fetchone()['nfldbproj_version']
def _nfldbproj_is_empty(conn):
"""
Returns `True` if and only if none of the nfldbproj tables exist in the database.
"""
with Tx(conn) as c:
c.execute('''
SELECT table_name from information_schema.tables
WHERE table_catalog = %s AND table_schema='public'
''', [_db_name(conn)])
table_names = {result['table_name'] for result in c.fetchall()}
return bool(nfldbproj_tables - table_names)
def _category_sql_field(self):
"""
Get a modified SQL definition of a statistical category column.
Unlike in nfldb's tables, we allow NULL statistics,
in order to differentiate between no projection and a projection of zero.
"""
return '{} {} NULL'.format(self.category_id, 'real' if self.is_real else 'smallint')
# What follows are the migration functions. They follow the naming
# convention "_migrate_nfldbproj_{VERSION}" where VERSION is an integer that
# corresponds to the version that the nfldbproj schema will be after the
# migration function runs. Each migration function is only responsible
# for running the queries required to update schema. It does not
# need to update the schema version.
#
# The migration functions should accept a cursor as a parameter,
# which is created in the _migrate function. In particular,
# each migration function is run in its own transaction. Commits
# and rollbacks are handled automatically.
def _migrate_nfldbproj(conn, to):
current = nfldbproj_schema_version(conn)
assert current <= to
globs = globals()
for v in range(current+1, to+1):
fname = '_migrate_nfldbproj_{}'.format(v)
with Tx(conn) as c:
assert fname in globs, 'Migration function {} not defined'.format(v)
globs[fname](c)
c.execute("UPDATE nfldbproj_meta SET nfldbproj_version = %s", (v,))
def _create_enum(c, enum):
c.execute('''
CREATE TYPE {} AS ENUM {}
'''.format(enum.__name__, _mogrify(c, enum)))
def _migrate_nfldbproj_1(c):
print('Adding nfldb-projections tables to the database...', file=sys.stderr)
_create_enum(c, ProjEnums.fantasy_position)
_create_enum(c, ProjEnums.proj_scope)
c.execute('''
CREATE DOMAIN uinteger AS integer
CHECK (VALUE >= 0)
''')
c.execute('''
CREATE TABLE nfldbproj_meta (
nfldbproj_version smallint
)
''')
c.execute('''
INSERT INTO nfldbproj_meta (nfldbproj_version) VALUES (0)
''')
c.execute('''
CREATE TABLE projection_source (
source_name character varying (100) NOT NULL,
source_url character varying (255) NULL,
source_notes text NULL,
PRIMARY KEY (source_name)
)
''')
c.execute('''
CREATE TABLE fp_system (
fpsys_name character varying (100) NOT NULL,
fpsys_url character varying (255) NULL,
PRIMARY KEY (fpsys_name)
)
''')
# Handle stat projections by allowing them to reference a fantasy-point system "None".
c.execute('''
INSERT INTO fp_system (fpsys_name) VALUES ('None')
''')
c.execute('''
CREATE TABLE fantasy_player (
fantasy_player_id character varying (10) NOT NULL,
player_id character varying (10) NULL,
dst_team character varying (3) NULL,
PRIMARY KEY (fantasy_player_id),
FOREIGN KEY (player_id)
REFERENCES player (player_id)
ON DELETE CASCADE
ON UPDATE CASCADE,
FOREIGN KEY (dst_team)
REFERENCES team (team_id)
ON DELETE CASCADE
ON UPDATE CASCADE,
CHECK (
(player_id IS NULL AND dst_team IS NOT NULL AND fantasy_player_id = dst_team) OR
(player_id IS NOT NULL AND dst_team IS NULL AND fantasy_player_id = player_id)
)
)
''')
c.execute('''
CREATE FUNCTION add_fantasy_player() RETURNS trigger AS $add_fantasy_player$
BEGIN
IF TG_TABLE_NAME = 'player' THEN
INSERT INTO fantasy_player (fantasy_player_id, player_id)
VALUES (NEW.player_id, NEW.player_id);
RETURN NEW;
ELSIF TG_TABLE_NAME = 'team' THEN
INSERT INTO fantasy_player (fantasy_player_id, dst_team)
VALUES (NEW.team_id, NEW.team_id);
INSERT INTO name_disambiguation (name_as_scraped, fantasy_player_id)
VALUES (NEW.team_id, NEW.team_id);
RETURN NEW;
END IF;
END;
$add_fantasy_player$ LANGUAGE plpgsql
''')
c.execute('''
CREATE TRIGGER fantasy_player_mirror_player
AFTER INSERT ON player
FOR EACH ROW
EXECUTE PROCEDURE add_fantasy_player()
''')
c.execute('''
CREATE TRIGGER fantasy_player_mirror_team
AFTER INSERT ON team
FOR EACH ROW
EXECUTE PROCEDURE add_fantasy_player()
''')
c.execute('''
INSERT INTO fantasy_player (fantasy_player_id, player_id)
SELECT player_id, player_id FROM player
''')
c.execute('''
INSERT INTO fantasy_player (fantasy_player_id, dst_team)
SELECT team_id, team_id FROM team
''')
c.execute('''
CREATE TABLE dfs_site (
fpsys_name character varying (100) NOT NULL CHECK (fpsys_name != 'None'),
dfs_name character varying (100) NOT NULL,
dfs_url character varying (255) NOT NULL,
PRIMARY KEY (fpsys_name, dfs_name),
FOREIGN KEY (fpsys_name)
REFERENCES fp_system (fpsys_name)
ON DELETE RESTRICT
)
''')
c.execute('''
CREATE TABLE dfs_salary (
fpsys_name character varying (100) NOT NULL,
dfs_name character varying (100) NOT NULL,
fantasy_player_id character varying (10) NOT NULL,
season_year usmallint NOT NULL,
season_type season_phase NOT NULL,
week usmallint NOT NULL,
salary uinteger NOT NULL,
PRIMARY KEY (fpsys_name, dfs_name, fantasy_player_id, season_year, season_type, week),
FOREIGN KEY (fpsys_name, dfs_name)
REFERENCES dfs_site (fpsys_name, dfs_name)
ON DELETE CASCADE,
FOREIGN KEY (fantasy_player_id)
REFERENCES fantasy_player (fantasy_player_id)
ON DELETE RESTRICT
)
''')
c.execute('''
CREATE TABLE projection_set (
source_name character varying (100) NOT NULL,
fpsys_name character varying (100) NOT NULL,
set_id SERIAL NOT NULL,
projection_scope proj_scope NOT NULL,
season_year usmallint NOT NULL,
season_type season_phase NOT NULL,
week usmallint NULL,
date_accessed utctime NOT NULL DEFAULT (now() AT TIME ZONE 'utc'),
known_incomplete bool NOT NULL DEFAULT FALSE,
PRIMARY KEY (source_name, fpsys_name, set_id),
FOREIGN KEY (source_name)
REFERENCES projection_source (source_name)
ON DELETE CASCADE,
FOREIGN KEY (fpsys_name)
REFERENCES fp_system (fpsys_name)
ON DELETE CASCADE
)
''')
c.execute('''
CREATE INDEX projection_set_in_year_phase_week ON projection_set
(season_year DESC, season_type DESC, week DESC)
''')
c.execute('''
CREATE TABLE stat_projection (
source_name character varying (100) NOT NULL,
fpsys_name character varying (100) NOT NULL CHECK (fpsys_name = 'None'),
set_id SERIAL NOT NULL,
fantasy_player_id character varying (10) NOT NULL,
gsis_id gameid NULL,
team character varying (3) NOT NULL,
fantasy_pos fantasy_position NOT NULL,
{},
PRIMARY KEY (source_name, fpsys_name, set_id, fantasy_player_id),
FOREIGN KEY (source_name)
REFERENCES projection_source (source_name)
ON DELETE CASCADE,
FOREIGN KEY (source_name, fpsys_name, set_id)
REFERENCES projection_set (source_name, fpsys_name, set_id)
ON DELETE CASCADE,
FOREIGN KEY (fantasy_player_id)
REFERENCES fantasy_player (fantasy_player_id)
ON DELETE RESTRICT,
FOREIGN KEY (gsis_id)
REFERENCES game (gsis_id)
ON DELETE RESTRICT,
FOREIGN KEY (team)
REFERENCES team (team_id)
ON DELETE RESTRICT
ON UPDATE CASCADE
)
'''.format(
', '.join(_category_sql_field(cat) for cat in _player_categories.values())
))
c.execute('''
CREATE TABLE fp_projection (
source_name character varying (100) NOT NULL,
fpsys_name character varying (100) NOT NULL CHECK (fpsys_name != 'None'),
set_id usmallint NOT NULL,
fantasy_player_id character varying (10) NOT NULL,
gsis_id gameid NULL,
team character varying (3) NOT NULL,
fantasy_pos fantasy_position NOT NULL,
projected_fp real NOT NULL,
fp_variance real NULL CHECK (fp_variance >= 0),
PRIMARY KEY (source_name, fpsys_name, set_id, fantasy_player_id),
FOREIGN KEY (source_name)
REFERENCES projection_source (source_name)
ON DELETE CASCADE,
FOREIGN KEY (source_name, fpsys_name, set_id)
REFERENCES projection_set (source_name, fpsys_name, set_id)
ON DELETE CASCADE,
FOREIGN KEY (fpsys_name)
REFERENCES fp_system (fpsys_name)
ON DELETE CASCADE,
FOREIGN KEY (fantasy_player_id)
REFERENCES fantasy_player (fantasy_player_id)
ON DELETE RESTRICT,
FOREIGN KEY (gsis_id)
REFERENCES game (gsis_id)
ON DELETE RESTRICT,
FOREIGN KEY (team)
REFERENCES team (team_id)
ON DELETE RESTRICT
ON UPDATE CASCADE
)
''')
c.execute('''
CREATE TABLE fp_score (
fpsys_name character varying (100) NOT NULL CHECK (fpsys_name != 'None'),
gsis_id gameid NOT NULL,
fantasy_player_id character varying (10) NOT NULL,
team character varying (3) NOT NULL,
fantasy_pos fantasy_position NOT NULL,
actual_fp real NOT NULL,
PRIMARY KEY (fpsys_name, gsis_id, fantasy_player_id),
FOREIGN KEY (fpsys_name)
REFERENCES fp_system (fpsys_name)
ON DELETE CASCADE,
FOREIGN KEY (gsis_id)
REFERENCES game (gsis_id)
ON DELETE RESTRICT,
FOREIGN KEY (fantasy_player_id)
REFERENCES fantasy_player (fantasy_player_id)
ON DELETE RESTRICT,
FOREIGN KEY (team)
REFERENCES team (team_id)
ON DELETE RESTRICT
ON UPDATE CASCADE
)
''')
c.execute('''
CREATE TABLE name_disambiguation (
name_as_scraped character varying (100) NOT NULL,
fantasy_player_id character varying (10) NOT NULL,
PRIMARY KEY (name_as_scraped),
FOREIGN KEY (fantasy_player_id)
REFERENCES fantasy_player (fantasy_player_id)
ON DELETE CASCADE
)
''')
# Name disambiguations for all team names.
for team_names in teams:
if team_names[0] == 'UNK':
continue
for team_name in team_names:
if team_name == 'New York': # 'New York' remains ambiguous.
continue
c.execute('''
INSERT INTO name_disambiguation (name_as_scraped, fantasy_player_id)
VALUES (%s, %s)
''', (team_name, team_names[0]))
| {
"content_hash": "0e038794c1525f819599b17c3ddb5cde",
"timestamp": "",
"source": "github",
"line_count": 463,
"max_line_length": 98,
"avg_line_length": 36.36285097192225,
"alnum_prop": 0.5936089332382989,
"repo_name": "hsharrison/nfldb-projections",
"id": "b36fd13c4b92948f75960676bb1911c879243a46",
"size": "16836",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nfldbproj/db.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "37800"
}
],
"symlink_target": ""
} |
"""
testsuite: runner
~~~~~~~~~~~~~~~~~
"""
import unittest
import sys
import protolint
from .base import switchout_streams, restore_streams
class RunnerTests(unittest.TestCase):
""" Test the entire `protolint` package. """
def test_run_linter(self):
""" test a full run of the linter """
switchout_streams()
with self.assertRaises(SystemExit) as exit:
from protolint.__main__ import run_tool
sys.argv = ['', 'protolint_tests/configs/sample.json', 'protolint_tests/']
run_tool()
restore_streams()
def test_run_linter_custom_config(self):
""" test a full run of the linter with custom config """
switchout_streams()
with self.assertRaises(SystemExit) as exit:
from protolint.__main__ import run_tool
sys.argv = ['', 'protolint_tests/configs/sample_with_protopaths.json', 'protolint_tests/']
run_tool()
restore_streams()
def test_run_linter_exclusion(self):
""" test a full run of the linter with exclusions configured """
switchout_streams()
with self.assertRaises(SystemExit) as exit:
from protolint.__main__ import run_tool
sys.argv = ['', 'protolint_tests/configs/sample_with_exclusion.json', 'protolint_tests/']
run_tool()
restore_streams()
def test_run_linter_empty(self):
""" test a full run of the linter with an empty project """
switchout_streams()
with self.assertRaises(SystemExit) as exit:
from protolint.__main__ import run_tool
sys.argv = ['', 'protolint_tests/configs/sample_empty.json', 'protolint_tests/']
run_tool()
restore_streams()
def test_run_linter_invalid_syntax_unexpected_token(self):
""" test a full run of the linter with an unexpected token syntax error """
switchout_streams()
with self.assertRaises(SystemExit) as exit:
from protolint.__main__ import run_tool
sys.argv = ['', 'protolint_tests/configs/sample_invalid_syntax.json', 'protolint_tests/']
run_tool()
restore_streams()
def test_run_linter_invalid_syntax_missing_close_bracket(self):
""" test a full run of the linter with a missing close bracket """
switchout_streams()
with self.assertRaises(SystemExit) as exit:
from protolint.__main__ import run_tool
sys.argv = ['', 'protolint_tests/configs/sample_invalid_syntax2.json', 'protolint_tests/']
run_tool()
restore_streams()
def test_run_linter_unused_import(self):
""" test a full run of the linter on an unused import """
switchout_streams()
with self.assertRaises(SystemExit) as exit:
from protolint.__main__ import run_tool
sys.argv = ['', 'protolint_tests/configs/sample_unused_import.json', 'protolint_tests/protos/unused_import']
run_tool()
restore_streams()
def test_run_linter_duplicate_enum(self):
""" test a full run of the linter on a duplicate enum number """
switchout_streams()
with self.assertRaises(SystemExit) as exit:
from protolint.__main__ import run_tool
sys.argv = ['', 'protolint_tests/configs/sample_duplicate_enum_number.json', 'protolint_tests/protos/repeated_enum_number']
run_tool()
restore_streams()
def test_run_linter_enum_first_must_be_zero(self):
""" test a full run of the linter on an enum that starts at 1 """
switchout_streams()
with self.assertRaises(SystemExit) as exit:
from protolint.__main__ import run_tool
sys.argv = ['', 'protolint_tests/configs/sample_enum_first_must_be_zero.json', 'protolint_tests/protos/enum_first_must_be_zero']
run_tool()
restore_streams()
def test_run_linter_unrecognized_type(self):
""" test a full run of the linter on an unrecognized type """
switchout_streams()
with self.assertRaises(SystemExit) as exit:
from protolint.__main__ import run_tool
sys.argv = ['', 'protolint_tests/configs/sample_unrecognized_type.json', 'protolint_tests/protos/unrecognized_type']
run_tool()
restore_streams()
def test_run_linter_duplicate_field(self):
""" test a full run of the linter on a duplicate field number """
switchout_streams()
with self.assertRaises(SystemExit) as exit:
from protolint.__main__ import run_tool
sys.argv = ['', 'protolint_tests/configs/sample_duplicate_field_number.json', 'protolint_tests/protos/repeated_field_number']
run_tool()
restore_streams()
def test_run_linter_already_defined(self):
""" test a full run of the linter on a field with a duplicate definition """
switchout_streams()
with self.assertRaises(SystemExit) as exit:
from protolint.__main__ import run_tool
sys.argv = ['', 'protolint_tests/configs/sample_already_defined.json', 'protolint_tests/protos/already_defined']
run_tool()
restore_streams()
| {
"content_hash": "ea7f4ddc7cdcf8f37a460f5e50192bc0",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 134,
"avg_line_length": 32.348993288590606,
"alnum_prop": 0.6740663900414938,
"repo_name": "sgammon/codeclimate-protobuf",
"id": "f88d6cfc94544a0a46fef299e4eb4b947458451a",
"size": "4845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "protolint_tests/test_run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3548"
},
{
"name": "Protocol Buffer",
"bytes": "3035"
},
{
"name": "Python",
"bytes": "51958"
},
{
"name": "Shell",
"bytes": "76"
}
],
"symlink_target": ""
} |
"""
Sandbox Panel Estimators
References
-----------
Baltagi, Badi H. `Econometric Analysis of Panel Data.` 4th ed. Wiley, 2008.
"""
from scikits.statsmodels.tools.tools import categorical
from scikits.statsmodels.regression.linear_model import GLS, WLS
import numpy as np
__all__ = ["PanelModel"]
try:
from pandas import LongPanel, __version__
__version__ >= .1
except:
raise ImportError("While in the sandbox this code depends on the pandas \
package. http://code.google.com/p/pandas/")
def group(X):
"""
Returns unique numeric values for groups without sorting.
Examples
--------
>>> X = np.array(['a','a','b','c','b','c'])
>>> group(X)
>>> g
array([ 0., 0., 1., 2., 1., 2.])
"""
uniq_dict = {}
group = np.zeros(len(X))
for i in xrange(len(X)):
if not X[i] in uniq_dict:
uniq_dict.update({X[i] : len(uniq_dict)})
group[i] = uniq_dict[X[i]]
return group
def repanel_cov(groups, sigmas):
'''calculate error covariance matrix for random effects model
Parameters
----------
groups : array, (nobs, nre) or (nobs,)
array of group/category observations
sigma : array, (nre+1,)
array of standard deviations of random effects,
last element is the standard deviation of the
idiosyncratic error
Returns
-------
omega : array, (nobs, nobs)
covariance matrix of error
omegainv : array, (nobs, nobs)
inverse covariance matrix of error
omegainvsqrt : array, (nobs, nobs)
squareroot inverse covariance matrix of error
such that omega = omegainvsqrt * omegainvsqrt.T
Notes
-----
This does not use sparse matrices and constructs nobs by nobs
matrices. Also, omegainvsqrt is not sparse, i.e. elements are non-zero
'''
if groups.ndim == 1:
groups = groups[:,None]
nobs, nre = groups.shape
omega = sigmas[-1]*np.eye(nobs)
for igr in range(nre):
group = groups[:,igr:igr+1]
groupuniq = np.unique(group)
dummygr = sigmas[igr] * (group == groupuniq).astype(float)
omega += np.dot(dummygr, dummygr.T)
ev, evec = np.linalg.eigh(omega) #eig doesn't work
omegainv = np.dot(evec, (1/ev * evec).T)
omegainvhalf = evec/np.sqrt(ev)
return omega, omegainv, omegainvhalf
class PanelData(LongPanel):
pass
class PanelModel(object):
"""
An abstract statistical model class for panel (longitudinal) datasets.
Parameters
---------
endog : array-like or str
If a pandas object is used then endog should be the name of the
endogenous variable as a string.
# exog
# panel_arr
# time_arr
panel_data : pandas.LongPanel object
Notes
-----
If a pandas object is supplied it is assumed that the major_axis is time
and that the minor_axis has the panel variable.
"""
def __init__(self, endog=None, exog=None, panel=None, time=None,
xtnames=None, equation=None, panel_data=None):
if panel_data == None:
# if endog == None and exog == None and panel == None and \
# time == None:
# raise ValueError("If pandel_data is False then endog, exog, \
#panel_arr, and time_arr cannot be None.")
self.initialize(endog, exog, panel, time, xtnames, equation)
# elif aspandas != False:
# if not isinstance(endog, str):
# raise ValueError("If a pandas object is supplied then endog \
#must be a string containing the name of the endogenous variable")
# if not isinstance(aspandas, LongPanel):
# raise ValueError("Only pandas.LongPanel objects are supported")
# self.initialize_pandas(endog, aspandas, panel_name)
def initialize(self, endog, exog, panel, time, xtnames, equation):
"""
Initialize plain array model.
See PanelModel
"""
#TODO: for now, we are going assume a constant, and then make the first
#panel the base, add a flag for this....
# get names
names = equation.split(" ")
self.endog_name = names[0]
exog_names = names[1:] # this makes the order matter in the array
self.panel_name = xtnames[0]
self.time_name = xtnames[1]
novar = exog.var(0) == 0
if True in novar:
cons_index = np.where(novar == 1)[0][0] # constant col. num
exog_names.insert(cons_index, 'cons')
self._cons_index = novar # used again in fit_fixed
self.exog_names = exog_names
self.endog = np.squeeze(np.asarray(endog))
exog = np.asarray(exog)
self.exog = exog
self.panel = np.asarray(panel)
self.time = np.asarray(time)
self.paneluniq = np.unique(panel)
self.timeuniq = np.unique(time)
#TODO: this structure can possibly be extracted somewhat to deal with
#names in general
#TODO: add some dimension checks, etc.
# def initialize_pandas(self, endog, aspandas):
# """
# Initialize pandas objects.
#
# See PanelModel.
# """
# self.aspandas = aspandas
# endog = aspandas[endog].values
# self.endog = np.squeeze(endog)
# exog_name = aspandas.columns.tolist()
# exog_name.remove(endog)
# self.exog = aspandas.filterItems(exog_name).values
#TODO: can the above be simplified to slice notation?
# if panel_name != None:
# self.panel_name = panel_name
# self.exog_name = exog_name
# self.endog_name = endog
# self.time_arr = aspandas.major_axis
#TODO: is time always handled correctly in fromRecords?
# self.panel_arr = aspandas.minor_axis
#TODO: all of this might need to be refactored to explicitly rely (internally)
# on the pandas LongPanel structure for speed and convenience.
# not sure this part is finished...
#TODO: doesn't conform to new initialize
def initialize_pandas(self, panel_data, endog_name, exog_name):
self.panel_data = panel_data
endog = panel_data[endog_name].values # does this create a copy?
self.endog = np.squeeze(endog)
if exog_name == None:
exog_name = panel_data.columns.tolist()
exog_name.remove(endog_name)
self.exog = panel_data.filterItems(exog_name).values # copy?
self._exog_name = exog_name
self._endog_name = endog_name
self._timeseries = panel_data.major_axis # might not need these
self._panelseries = panel_data.minor_axis
#TODO: this could be pulled out and just have a by kwd that takes
# the panel or time array
#TODO: this also needs to be expanded for 'twoway'
def _group_mean(self, X, index='oneway', counts=False, dummies=False):
"""
Get group means of X by time or by panel.
index default is panel
"""
if index == 'oneway':
Y = self.panel
uniq = self.paneluniq
elif index == 'time':
Y = self.time
uniq = self.timeuniq
else:
raise ValueError("index %s not understood" % index)
#TODO: use sparse matrices
dummy = (Y == uniq[:,None]).astype(float)
if X.ndim > 1:
mean = np.dot(dummy,X)/dummy.sum(1)[:,None]
else:
mean = np.dot(dummy,X)/dummy.sum(1)
if counts == False and dummies == False:
return mean
elif counts == True and dummies == False:
return mean, dummy.sum(1)
elif counts == True and dummies == True:
return mean, dummy.sum(1), dummy
elif counts == False and dummies == True:
return mean, dummy
#TODO: Use kwd arguments or have fit_method methods?
def fit(self, model=None, method=None, effects='oneway'):
"""
method : LSDV, demeaned, MLE, GLS, BE, FE, optional
model :
between
fixed
random
pooled
[gmm]
effects :
oneway
time
twoway
femethod : demeaned (only one implemented)
WLS
remethod :
swar -
amemiya
nerlove
walhus
Notes
------
This is unfinished. None of the method arguments work yet.
Only oneway effects should work.
"""
if method: # get rid of this with default
method = method.lower()
model = model.lower()
if method and method not in ["lsdv", "demeaned", "mle", "gls", "be",
"fe"]: # get rid of if method with default
raise ValueError("%s not a valid method" % method)
# if method == "lsdv":
# self.fit_lsdv(model)
if model == 'pooled':
return GLS(self.endog, self.exog).fit()
if model == 'between':
return self._fit_btwn(method, effects)
if model == 'fixed':
return self._fit_fixed(method, effects)
# def fit_lsdv(self, effects):
# """
# Fit using least squares dummy variables.
#
# Notes
# -----
# Should only be used for small `nobs`.
# """
# pdummies = None
# tdummies = None
def _fit_btwn(self, method, effects):
# group mean regression or WLS
if effects != "twoway":
endog = self._group_mean(self.endog, index=effects)
exog = self._group_mean(self.exog, index=effects)
else:
raise ValueError("%s effects is not valid for the between \
estimator" % s)
befit = GLS(endog, exog).fit()
return befit
def _fit_fixed(self, method, effects):
endog = self.endog
exog = self.exog
demeantwice = False
if effects in ["oneway","twoways"]:
if effects == "twoways":
demeantwice = True
effects = "oneway"
endog_mean, counts = self._group_mean(endog, index=effects,
counts=True)
exog_mean = self._group_mean(exog, index=effects)
counts = counts.astype(int)
endog = endog - np.repeat(endog_mean, counts)
exog = exog - np.repeat(exog_mean, counts, axis=0)
if demeantwice or effects == "time":
endog_mean, dummies = self._group_mean(endog, index="time",
dummies=True)
exog_mean = self._group_mean(exog, index="time")
# This allows unbalanced panels
endog = endog - np.dot(endog_mean, dummies)
exog = exog - np.dot(dummies.T, exog_mean)
fefit = GLS(endog, exog[:,-self._cons_index]).fit()
#TODO: might fail with one regressor
return fefit
class SURPanel(PanelModel):
pass
class SEMPanel(PanelModel):
pass
class DynamicPanel(PanelModel):
pass
if __name__ == "__main__":
try:
import pandas
pandas.version >= .1
except:
raise ImportError("pandas >= .10 not installed")
from pandas import LongPanel
import scikits.statsmodels.api as sm
import numpy.lib.recfunctions as nprf
data = sm.datasets.grunfeld.load()
# Baltagi doesn't include American Steel
endog = data.endog[:-20]
fullexog = data.exog[:-20]
# fullexog.sort(order=['firm','year'])
panel_arr = nprf.append_fields(fullexog, 'investment', endog, float,
usemask=False)
panel_panda = LongPanel.fromRecords(panel_arr, major_field='year',
minor_field='firm')
# the most cumbersome way of doing it as far as preprocessing by hand
exog = fullexog[['value','capital']].view(float).reshape(-1,2)
exog = sm.add_constant(exog)
panel = group(fullexog['firm'])
year = fullexog['year']
panel_mod = PanelModel(endog, exog, panel, year, xtnames=['firm','year'],
equation='invest value capital')
# note that equation doesn't actually do anything but name the variables
panel_ols = panel_mod.fit(model='pooled')
panel_be = panel_mod.fit(model='between', effects='oneway')
panel_fe = panel_mod.fit(model='fixed', effects='oneway')
panel_bet = panel_mod.fit(model='between', effects='time')
panel_fet = panel_mod.fit(model='fixed', effects='time')
panel_fe2 = panel_mod.fit(model='fixed', effects='twoways')
#see also Baltagi (3rd edt) 3.3 THE RANDOM EFFECTS MODEL p.35
#for explicit formulas for spectral decomposition
#but this works also for unbalanced panel
#
#I also just saw: 9.4.2 The Random Effects Model p.176 which is
#partially almost the same as I did
#
#this needs to use sparse matrices for larger datasets
#
#"""
#
#import numpy as np
#
groups = np.array([0,0,0,1,1,2,2,2])
nobs = groups.shape[0]
groupuniq = np.unique(groups)
periods = np.array([0,1,2,1,2,0,1,2])
perioduniq = np.unique(periods)
dummygr = (groups[:,None] == groupuniq).astype(float)
dummype = (periods[:,None] == perioduniq).astype(float)
sigma = 1.
sigmagr = np.sqrt(2.)
sigmape = np.sqrt(3.)
#dummyall = np.c_[sigma*np.ones((nobs,1)), sigmagr*dummygr,
# sigmape*dummype]
#exclude constant ?
dummyall = np.c_[sigmagr*dummygr, sigmape*dummype]
# omega is the error variance-covariance matrix for the stacked
# observations
omega = np.dot(dummyall, dummyall.T) + sigma* np.eye(nobs)
print omega
print np.linalg.cholesky(omega)
ev, evec = np.linalg.eigh(omega) #eig doesn't work
omegainv = np.dot(evec, (1/ev * evec).T)
omegainv2 = np.linalg.inv(omega)
omegacomp = np.dot(evec, (ev * evec).T)
print np.max(np.abs(omegacomp - omega))
#check
#print np.dot(omegainv,omega)
print np.max(np.abs(np.dot(omegainv,omega) - np.eye(nobs)))
omegainvhalf = evec/np.sqrt(ev) #not sure whether ev shouldn't be column
print np.max(np.abs(np.dot(omegainvhalf,omegainvhalf.T) - omegainv))
# now we can use omegainvhalf in GLS (instead of the cholesky)
sigmas2 = np.array([sigmagr, sigmape, sigma])
groups2 = np.column_stack((groups, periods))
omega_, omegainv_, omegainvhalf_ = repanel_cov(groups2, sigmas2)
print np.max(np.abs(omega_ - omega))
print np.max(np.abs(omegainv_ - omegainv))
print np.max(np.abs(omegainvhalf_ - omegainvhalf))
# notation Baltagi (3rd) section 9.4.1 (Fixed Effects Model)
Pgr = reduce(np.dot,[dummygr,
np.linalg.inv(np.dot(dummygr.T, dummygr)),dummygr.T])
Qgr = np.eye(nobs) - Pgr
# within group effect: np.dot(Qgr, groups)
# but this is not memory efficient, compared to groupstats
print np.max(np.abs(np.dot(Qgr, groups)))
| {
"content_hash": "90e0d744170b7421682d54ebc7c4e3d0",
"timestamp": "",
"source": "github",
"line_count": 444,
"max_line_length": 80,
"avg_line_length": 33.13288288288288,
"alnum_prop": 0.5990075453742097,
"repo_name": "wesm/statsmodels",
"id": "22766de7fb51f52e27d8b618bb2528982c15c87b",
"size": "14711",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scikits/statsmodels/sandbox/panelmod.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "10509"
},
{
"name": "C",
"bytes": "11707"
},
{
"name": "Python",
"bytes": "3470843"
},
{
"name": "R",
"bytes": "2168"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(
name='apikit',
version='0.4',
url='http://github.com/pudo/apikit',
license='MIT',
author='Friedrich Lindenberg',
author_email='friedrich@pudo.org',
description='A set of utility functions for RESTful Flask applications.',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
namespace_packages=[],
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=[
'Flask', 'six'
],
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| {
"content_hash": "6443f2ca831cb4ed988fa5f12a06644e",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 77,
"avg_line_length": 34.515151515151516,
"alnum_prop": 0.6084284460052678,
"repo_name": "pudo/apikit",
"id": "ff20bfc54875dccb7707619cfde65ab8d1f7c65e",
"size": "1139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10958"
}
],
"symlink_target": ""
} |
class Objeto():
"""Clase que al ser instanciada hará referencia a un objeto dentro de
la base el cuál tendrá diferentes tipos de variables almacenadas en
la misma las cuales podrán ser de atributos heterogéneos"""
def __init__(self, name=""):
self._name = name
self._variables = None
self._clase = 0
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def variables(self):
return self._variables
@variables.setter
def variables(self, value):
self._variables = value
@property
def clase(self):
return self._clase
@clase.setter
def clase(self, value):
self._clase = value
| {
"content_hash": "2dcb2008134ea865c211b4abd4d2f9ad",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 73,
"avg_line_length": 22.676470588235293,
"alnum_prop": 0.6108949416342413,
"repo_name": "cabaag/serp",
"id": "9b798d00dba5daa555062c3e8792017912e6e558",
"size": "802",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "serp/app/core/Objeto.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "25559"
},
{
"name": "Python",
"bytes": "28108"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('frog', '0013_siteconfig'),
]
operations = [
migrations.AddField(
model_name='siteconfig',
name='image_size_cap',
field=models.SmallIntegerField(default=5120),
),
migrations.AddField(
model_name='siteconfig',
name='site_url',
field=models.URLField(blank=True),
),
migrations.AddField(
model_name='siteconfig',
name='thumbnail_size',
field=models.SmallIntegerField(default=256),
),
]
| {
"content_hash": "e1711c9b047f6fc2bb77aeb206de3833",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 57,
"avg_line_length": 25.214285714285715,
"alnum_prop": 0.5637393767705382,
"repo_name": "theiviaxx/Frog",
"id": "be9b9b51d5d3c7eb7347e30ace766492d41dcc93",
"size": "780",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "frog/migrations/0014_auto_20190818_1628.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "22225"
},
{
"name": "JavaScript",
"bytes": "57292"
},
{
"name": "Python",
"bytes": "215494"
}
],
"symlink_target": ""
} |
import requests
from threading import Thread
import sys
import getopt
def banner():
print "\n***************************************"
print "* ForzaBruta 0.1*"
print "***************************************"
def usage():
print "Usage:"
print " -w: url (http://somesite.com/FUZZ)"
print " -t: threads"
print " -f: dictionary file\n"
print "example: forzabruta.py -w http://www.targetsite.com/FUZZ -t 5 -f common.txt\n"
class request_performer(Thread):
def __init__( self,word,url):
Thread.__init__(self)
try:
self.word = word.split("\n")[0]
self.urly = url.replace('FUZZ',self.word)
self.url = self.urly
except Exception, e:
print e
def run(self):
try:
r = requests.get(self.url)
print self.url + " - " + str(r.status_code)
i[0]=i[0]-1 #Here we remove one thread from the counter
except Exception, e:
print e
def start(argv):
banner()
if len(sys.argv) < 5:
usage()
sys.exit()
try :
opts, args = getopt.getopt(argv,"w:f:t:")
except getopt.GetoptError:
print "Error en arguments"
sys.exit()
for opt,arg in opts :
if opt == '-w' :
url=arg
elif opt == '-f':
dict= arg
elif opt == '-t':
threads=arg
try:
f = open(dict, "r")
words = f.readlines()
except:
print"Failed opening file: "+ dict+"\n"
sys.exit()
launcher_thread(words,threads,url)
def launcher_thread(names,th,url):
global i
i=[]
resultlist=[]
i.append(0)
while len(names):
try:
if i[0]<th:
n = names.pop(0)
i[0]=i[0]+1
thread=request_performer(n,url)
thread.start()
except KeyboardInterrupt:
print "ForzaBruta interrupted by user. Finishing attack.."
sys.exit()
thread.join()
return
if __name__ == "__main__":
try:
start(sys.argv[1:])
except KeyboardInterrupt:
print "ForzaBruta interrupted by user, killing all threads..!!"
| {
"content_hash": "fb38b10ee14e6f591bfa45e1785b2f23",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 89,
"avg_line_length": 26.406976744186046,
"alnum_prop": 0.48701012769704977,
"repo_name": "abhishekjiitr/network-security",
"id": "5d133419b45ffde36b67a8d25cb18489460176bb",
"size": "2271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "directory brute forcing/forzabruta.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5115"
},
{
"name": "Shell",
"bytes": "31"
}
],
"symlink_target": ""
} |
from datadog_checks.base.checks.windows.perf_counters.base import PerfCountersBaseCheckWithLegacySupport
from datadog_checks.base.checks.windows.perf_counters.counter import PerfObject
from datadog_checks.base.checks.windows.perf_counters.transform import NATIVE_TRANSFORMERS
from .metrics import METRICS_CONFIG
class ExchangeCheckV2(PerfCountersBaseCheckWithLegacySupport):
__NAMESPACE__ = 'exchange'
def get_default_config(self):
return {'metrics': METRICS_CONFIG}
def get_perf_object(self, connection, object_name, object_config, use_localized_counters, tags):
if object_name == 'Processor':
return CompatibilityPerfObject(
self,
connection,
object_name,
object_config,
use_localized_counters,
tags,
{'% Processor Time': 'cpu_time', '% User Time': 'cpu_user', '% Privileged Time': 'cpu_privileged'},
)
elif object_name == 'MSExchange Active Manager':
return CompatibilityPerfObject(
self,
connection,
object_name,
object_config,
use_localized_counters,
tags,
{'Database Mounted': 'database_mounted'},
)
elif object_name == 'Web Service':
return CompatibilityPerfObject(
self,
connection,
object_name,
object_config,
use_localized_counters,
tags,
{
'Current Connections': 'current_connections_total',
'Connection Attempts/sec': 'connection_attempts',
'Other Request Methods/sec': 'other_attempts',
},
)
else:
return super().get_perf_object(connection, object_name, object_config, use_localized_counters, tags)
class CompatibilityPerfObject(PerfObject):
def __init__(
self,
check,
connection,
object_name,
object_config,
use_localized_counters,
tags,
aggregate_names,
):
super().__init__(check, connection, object_name, object_config, use_localized_counters, tags)
self._aggregate_names = aggregate_names
def _configure_counters(self, available_counters, available_instances):
super()._configure_counters(available_counters, available_instances)
for counter in self.counters:
if counter.name not in self._aggregate_names:
continue
counter.aggregate_transformer = NATIVE_TRANSFORMERS[counter.metric_type](
self.check, f'{self.metric_prefix}.{self._aggregate_names[counter.name]}', {}
)
| {
"content_hash": "ac65ef4c6c1c0096e843667278d0ddec",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 115,
"avg_line_length": 36.63636363636363,
"alnum_prop": 0.5809996455157745,
"repo_name": "DataDog/integrations-core",
"id": "4dfc18b9572c1c96bf9b7ee69b281b16456b1aa6",
"size": "2936",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exchange_server/datadog_checks/exchange_server/check.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "578"
},
{
"name": "COBOL",
"bytes": "12312"
},
{
"name": "Dockerfile",
"bytes": "22998"
},
{
"name": "Erlang",
"bytes": "15518"
},
{
"name": "Go",
"bytes": "6988"
},
{
"name": "HCL",
"bytes": "4080"
},
{
"name": "HTML",
"bytes": "1318"
},
{
"name": "JavaScript",
"bytes": "1817"
},
{
"name": "Kotlin",
"bytes": "430"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "PHP",
"bytes": "20"
},
{
"name": "PowerShell",
"bytes": "2398"
},
{
"name": "Python",
"bytes": "13020828"
},
{
"name": "Roff",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "241"
},
{
"name": "Scala",
"bytes": "7000"
},
{
"name": "Shell",
"bytes": "83227"
},
{
"name": "Swift",
"bytes": "203"
},
{
"name": "TSQL",
"bytes": "29972"
},
{
"name": "TypeScript",
"bytes": "1019"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.