code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# Xlib.__init__ -- glue for Xlib package
#
# Copyright (C) 2000-2002 Peter Liljenberg <petli@ctrl-c.liu.se>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
__version__ = (0, 31)
__version_extra__ = ''
__version_string__ = '.'.join(map(str, __version__)) + __version_extra__
__all__ = [
'X',
'XK',
'Xatom',
'Xcursorfont',
'Xutil',
'display',
'error',
'rdb',
# Explicitly exclude threaded, so that it isn't imported by
# from Xlib import *
]
|
python-xlib/python-xlib
|
Xlib/__init__.py
|
Python
|
lgpl-2.1
| 1,184
|
from scrapy.spiders import Spider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from scrapy.conf import settings
from beerindex.items import BeerindexItem
import logging
import lxml.html
from urlparse import urlparse
import re
class BeerSpider(Spider):
name = "beerspider"
beer_sites = {
'www.wbeer.com.br':
{
"start_url" : 'https://www.wbeer.com.br/browse.ep?cID=103354',
"next_link" : '.paginacao li.prox a::attr(href)',
"product_link" : '.catalogo-lista .lista .informacoes a::attr("href")',
"xpath_title" : "//span[@itemprop='name']//text()",
"xpath_price" : "//div[@class='preco-por']//text()",
"xpath_style" : "//div[@class='resumo']//span[@class='nome-tipo']//text()"
},
'www.emporioveredas.com.br' : {
"start_url" : 'http://www.emporioveredas.com.br/cervejas-importadas.html',
"next_link" : '.pager a.next::attr(href)',
"product_link" : '.products-grid a.product-image ::attr("href")',
"xpath_title" : "//h1[@itemprop='name']//text()",
"xpath_price" : "//div[@class='product-shop']//span[@itemprop='price']//text()",
"xpath_style" : "//table[@id='product-attribute-specs-table']//tr[contains(.,'Estilo')]//td[last()]//text()"
},
'www.mundodascervejas.com' : {
"start_url" : 'http://www.mundodascervejas.com/buscar?q=cerveja',
"next_link" : '.topo .pagination a[rel="next"]::attr("href")',
"product_link" : '#listagemProdutos a.produto-sobrepor::attr("href")',
"xpath_title" : "//h1[@itemprop='name']//text()",
"xpath_price" : "//div[@class='principal']//div[contains(@class,'preco-produto')]//strong[contains(@class,'preco-promocional')]//text()",
"xpath_style" : "//div[@id='descricao']//table//tr[contains(.,'Estilo')]//td[last()]//text()"
},
'www.clubeer.com.br': {
"start_url" : 'http://www.clubeer.com.br/loja',
"next_link" : '#pagination li.current + li a::attr("href")',
"product_link" : '.minhascervejas li .areaborder > a:first-child::attr("href")',
"xpath_title" : "//h1[@itemprop='name']//text()",
"xpath_price" : "//div[@id='principal']//div[contains(@class,'areaprecos')]//span[@itemprop='price']//text()",
"xpath_style" : "//div[contains(@class,'areaprodutoinfoscontent')]//ul[contains(.,'ESTILO')]//li[position()=2]//text()"
},
'www.clubedomalte.com.br': {
"start_url" : 'http://www.clubedomalte.com.br/pais',
"next_link" : '.paginacao li.pg:last-child a::attr("href")',
"product_link" : '.mainBar .spotContent > a:first-child::attr("href")',
"xpath_title" : "//h1[@itemprop='name']//text()",
"xpath_price" : "//div[contains(@class,'interna')]//div[contains(@class,'preco')]//*[@itemprop='price']//text()",
"xpath_style" : "//div[contains(@class,'areaprodutoinfoscontent')]//ul[contains(.,'ESTILO')]//li[position()=2]//text()"
}
}
def domain_from_url(self,url):
parsed = urlparse(url)
return parsed.netloc
#allowed_domains = ["www.cervejastore.com.br"]
# start_urls = ['http://www.mundodascervejas.com/buscar?q=cerveja']
# start_urls = ["http://www.emporioveredas.com.br/cervejas-importadas.html"]
start_urls = [beer_sites[store]["start_url"] for store in beer_sites]
def parse(self,response):
domain = self.domain_from_url(response.url)
for url in response.css(self.beer_sites[domain]["next_link"]).extract():
request = Request(response.urljoin(url.strip()), self.parse)
yield request
titles = response.css(self.beer_sites[domain]["product_link"]).extract()
for title in titles:
yield Request(response.urljoin(title), self.parse_product)
def parse_product(self,response):
domain = self.domain_from_url(response.url)
item = BeerindexItem()
item["name"] = response.xpath(self.beer_sites[domain]["xpath_title"]).extract_first()
item["style"] = response.xpath(self.beer_sites[domain]["xpath_style"]).extract_first()
item["link"] = response.url
item["price"] = "".join(response.xpath(self.beer_sites[domain]["xpath_price"]).extract())
item["price"] = re.sub(r"\s+", "", item["price"], flags=re.UNICODE)
item["price"] = re.sub(r"[^\d,\.+]", "", item["price"], flags=re.UNICODE)
item["price"] = re.sub(r",", ".", item["price"], flags=re.UNICODE)
yield item
|
asiviero/brbeerindex
|
beerindex/spiders/beerspider.py
|
Python
|
lgpl-2.1
| 4,667
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import define
import tw8836
import spi
def quad_check():
status = spi.status1_read()
if (status & 0x40):
print 'SPI flash is already in QUAD mode'
return define.TRUE
else:
print 'SPI flash is not in QUAD mode yet'
return define.FALSE
def quad_enable():
status = spi.status1_read()
spi.write_enable()
spi.status1_write(status | 0x40)
spi.write_disable()
def quad_disable():
status = spi.status1_read()
spi.write_enable()
spi.status1_write(status & ~0x40)
spi.write_disable()
def four_byte_check():
status = spi.status2_read()
if (status & 0x20):
if define.DEBUG == define.ON:
print 'SPI flash is in 4 Byte mode'
spi.bank_address_register_write(0x80)
return define.TRUE
else:
if define.DEBUG == define.ON:
print 'SPI flash is not in 4 Byte mode'
spi.bank_address_register_write(0x0)
return define.FALSE
def four_byte_enter():
tw8836.write_page(0x04)
tw8836.write(0xF3, (spi.DMA_DEST_CHIPREG << 6) + spi.DMA_CMD_COUNT_1)
tw8836.write(0xF5, 0) #length high
tw8836.write(0xF8, 0) #length middle
tw8836.write(0xF9, 0) #length low
tw8836.write(0xFA, spi.SPICMD_EN4B)
tw8836.write(0xF4, spi.SPI_CMD_OPT_NONE | spi.DMA_START)
def four_byte_exit():
tw8836.write_page(0x04)
tw8836.write(0xF3, (spi.DMA_DEST_CHIPREG << 6) + spi.DMA_CMD_COUNT_1)
tw8836.write(0xF5, 0) #length high
tw8836.write(0xF8, 0) #length middle
tw8836.write(0xF9, 0) #length low
tw8836.write(0xFA, spi.SPICMD_EX4B)
tw8836.write(0xF4, spi.SPI_CMD_OPT_NONE | spi.DMA_START)
def erase_fail_check():
status = spi.security_register_read()
if (status & 0x40):
print 'erase failed'
spi.sr_clear()
return define.TRUE
else:
print 'erase succeed'
return define.FALSE
def dummy_cycles_config(mode, cycles):
print 'dummy_cycles_config in issi.py'
status2_register = spi.status2_read()
print hex(status2_register)
|
lgnq/RPI8836
|
issi.py
|
Python
|
lgpl-3.0
| 2,227
|
from .ao_integrals import AOIntegrals, ao2mo
from .mo_integrals import FCIDUMP
from .mo_integrals import load as load_fcidump
from .fcidump_generator import generate_fcidump
|
orbkit/orbkit
|
orbkit/libcint_interface/__init__.py
|
Python
|
lgpl-3.0
| 174
|
import time
import requests
from collectors.lib import utils
from collectors.lib.collectorbase import CollectorBase
# reference by https://hadoop.apache.org/docs/r2.7.2/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapredAppMasterRest.html
REST_API = {"YARN_APPS_PATH": "ws/v1/cluster/apps",
"MAPREDUCE_JOBS_PATH": "ws/v1/mapreduce/jobs"}
# response form 'ws/v1/mapreduce/jobs'
# {
# "jobs": {
# "job": [
# {
# "startTime": 1453761316277,
# "finishTime": 0,
# "elapsedTime": 99221829,
# "id": "job_1453738555560_0001",
# "name": "WordCount",
# "user": "vagrant",
# "state": "RUNNING",
# "mapsTotal": 1,
# "mapsCompleted": 0,
# "reducesTotal": 1,
# "reducesCompleted": 0,
# "mapProgress": 48.335266,
# "reduceProgress": 0.0,
# "mapsPending": 0,
# "mapsRunning": 1,
# "reducesPending": 1,
# "reducesRunning": 0,
# "uberized": false,
# "diagnostics": "",
# "newReduceAttempts": 1,
# "runningReduceAttempts": 0,
# "failedReduceAttempts": 0,
# "killedReduceAttempts": 0,
# "successfulReduceAttempts": 0,
# "newMapAttempts": 0,
# "runningMapAttempts": 1,
# "failedMapAttempts": 1,
# "killedMapAttempts": 0,
# "successfulMapAttempts": 0
# }
# ]
# }
# }
JOB = ['elapsedTime', 'mapsTotal', 'mapsCompleted', 'reducesTotal', 'reducesCompleted', 'mapsPending', 'mapsRunning', 'reducesPending', 'reducesRunning', 'newReduceAttempts', 'runningReduceAttempts',
'failedReduceAttempts', 'killedReduceAttempts', 'successfulReduceAttempts', 'newMapAttempts', 'runningMapAttempts', 'failedMapAttempts', 'killedMapAttempts', 'successfulMapAttempts']
# form 'http://localhost:8088/proxy/application_1453738555560_0001/ws/v1/mapreduce/jobs/application_1453738555560_0001/counters'
# {
# "jobCounters": {
# "id": "job_1453738555560_0001",
# "counterGroup": [
# {
# "counterGroupName": "org.apache.hadoop.mapreduce.FileSystemCounter",
# "counter": [
# {
# "name": "FILE_BYTES_READ",
# "totalCounterValue": 0,
# "mapCounterValue": 1,
# "reduceCounterValue": 2
# },
# {
# "name": "FILE_BYTES_WRITTEN",
# "totalCounterValue": 3,
# "mapCounterValue": 4,
# "reduceCounterValue": 5
# }
# ]
# }
# ]
# }
# }
JOB_COUNTER = ['reduceCounterValue', 'mapCounterValue', 'totalCounterValue']
# form 'http://localhost:8088/proxy/application_1453738555560_0001/ws/v1/mapreduce/jobs/application_1453738555560_0001/tasks'
# {
# "tasks": {
# "task": [
# {
# "startTime": 1453761318527,
# "finishTime": 0,
# "elapsedTime": 99869037,
# "progress": 49.11076,
# "id": "task_1453738555560_0001_m_000000",
# "state": "RUNNING",
# "type": "MAP",
# "successfulAttempt": "",
# "status": "map > map"
# }
# ]
# }
# }
class MapReduce(CollectorBase):
def __init__(self, config, logger, readq):
super(MapReduce, self).__init__(config, logger, readq)
self.port = self.get_config('port', 8080)
self.host = self.get_config('host', "localhost")
self.http_prefix = 'http://%s:%s' % (self.host, self.port)
def __call__(self):
try:
running_apps = self._get_running_app_ids()
running_jobs = self._mapreduce_job_metrics(running_apps)
self._mapreduce_job_counters_metrics(running_jobs)
self._mapreduce_task_metrics(running_jobs)
self._readq.nput("mapreduce.state %s %s" % (int(time.time()), '0'))
except Exception as e:
self._readq.nput("mapreduce.state %s %s" % (int(time.time()), '1'))
self.log_exception('exception collecting mapreduce metrics %s' % e)
def _get_running_app_ids(self):
try:
running_apps = {}
metrics_json = self.request("/%s?%s" % (REST_API['YARN_APPS_PATH'], "states=RUNNING&applicationTypes=MAPREDUCE"))
if metrics_json.get('apps'):
if metrics_json['apps'].get('app') is not None:
for app_json in metrics_json['apps']['app']:
app_id = app_json.get('id')
tracking_url = app_json.get('trackingUrl')
app_name = app_json.get('name')
if app_id and tracking_url and app_name:
running_apps[app_id] = (app_name, tracking_url)
except Exception as e:
self._readq.nput("mapreduce.state %s %s" % (int(time.time()), '1'))
self.log_exception('exception collecting yarn apps metric for mapreduce \n %s',e)
return running_apps
def _mapreduce_job_metrics(self, running_apps):
'''
Get metrics for each MapReduce job.
Return a dictionary for each MapReduce job
{
job_id: {
'job_name': job_name,
'app_name': app_name,
'user_name': user_name,
'tracking_url': tracking_url
}
'''
try:
running_jobs = {}
for app_id, (app_name, tracking_url) in running_apps.iteritems():
ts = time.time()
metrics_json = self.request_url("%s%s" % (tracking_url,REST_API['MAPREDUCE_JOBS_PATH']))
if metrics_json.get('jobs'):
if metrics_json['jobs'].get('job'):
for job_json in metrics_json['jobs']['job']:
job_id = job_json.get('id')
job_name = job_json.get('name')
user_name = job_json.get('user')
if job_id and job_name and user_name:
# Build the structure to hold the information for each job ID
running_jobs[str(job_id)] = {'job_name': str(job_name),
'app_name': str(app_name),
'user_name': str(user_name),
'tracking_url': "%s%s/%s" % (tracking_url, REST_API['MAPREDUCE_JOBS_PATH'], job_id)}
for metric in JOB:
self._readq.nput('mapreduce.job.%s %d %d app_name=%s user_name=%s job_name=%s' % (metric, ts, job_json[metric], utils.remove_invalid_characters(str(app_name)), utils.remove_invalid_characters(str(user_name)), utils.remove_invalid_characters(str(job_name))))
except Exception as e:
self._readq.nput("mapreduce.state %s %s" % (int(time.time()), '1'))
self.log_exception('exception collecting mapreduce jobs metric \n %s',e)
return running_jobs
def _mapreduce_job_counters_metrics(self, running_jobs):
'''
Get custom metrics specified for each counter
'''
try:
for job_id, job_metrics in running_jobs.iteritems():
ts = time.time()
job_name = job_metrics['job_name']
if job_name:
metrics_json = self.request_url("%s%s" % (job_metrics['tracking_url'],'/counters'))
if metrics_json.get('jobCounters'):
if metrics_json['jobCounters'].get('counterGroup'):
for counter_group in metrics_json['jobCounters']['counterGroup']:
group_name = counter_group.get('counterGroupName')
if group_name:
if counter_group.get('counter'):
for counter in counter_group['counter']:
counter_name = counter.get('name')
for metric in JOB_COUNTER:
self._readq.nput('mapreduce.job.counter.%s %d %d app_name=%s user_name=%s job_name=%s counter_name=%s' % (metric, ts, counter[metric], utils.remove_invalid_characters(job_metrics.get('app_name')), utils.remove_invalid_characters(job_metrics.get('user_name')), utils.remove_invalid_characters(job_name), utils.remove_invalid_characters(str(counter_name).lower())))
except Exception as e:
self._readq.nput("mapreduce.state %s %s" % (int(time.time()), '1'))
self.log_exception('exception collecting mapreduce jobs counter metric \n %s',e)
def _mapreduce_task_metrics(self, running_jobs):
'''
Get metrics for each MapReduce task
Return a dictionary of {task_id: 'tracking_url'} for each MapReduce task
'''
try:
for job_id, job_stats in running_jobs.iteritems():
ts = time.time()
metrics_json = self.request_url("%s%s" % (job_stats['tracking_url'],'/tasks'))
if metrics_json.get('tasks'):
if metrics_json['tasks'].get('task'):
for task in metrics_json['tasks']['task']:
task_type = task.get('type')
if task_type:
if task_type == 'MAP':
self._readq.nput('mapreduce.job.map.task.progress %d %d app_name=%s user_name=%s job_name=%s task_type=%s' % (ts, task['progress'], utils.remove_invalid_characters(job_stats.get('app_name')), utils.remove_invalid_characters(job_stats.get('user_name')), utils.remove_invalid_characters(job_stats.get('job_name')), utils.remove_invalid_characters(str(task_type).lower())))
elif task_type == 'REDUCE':
self._readq.nput('mapreduce.job.reduce.task.progress %d %d app_name=%s user_name=%s job_name=%s task_type=%s' % (ts, task['progress'], utils.remove_invalid_characters(job_stats.get('app_name')), utils.remove_invalid_characters(job_stats.get('user_name')), utils.remove_invalid_characters(job_stats.get('job_name')), utils.remove_invalid_characters(str(task_type).lower())))
except Exception as e:
self._readq.nput("mapreduce.state %s %s" % (int(time.time()), '1'))
self.log_exception('exception collecting mapreduce task metric \n %s',e)
def request(self,uri):
resp = requests.get('%s%s' % (self.http_prefix, uri))
if resp.status_code != 200:
raise HTTPError('%s%s' % (self.http_prefix, uri))
return resp.json()
def request_url(self, url):
resp = requests.get(url)
if resp.status_code != 200:
if resp.status_code > 500:
self.log_exception("mapreduce collector can not access url : %s" % url)
raise HTTPError(url)
return resp.json()
class HTTPError(RuntimeError):
def __init__(self, resp):
RuntimeError.__init__(self, str(resp))
self.resp = resp
|
wangy1931/tcollector
|
collectors/builtin/map_reduce.py
|
Python
|
lgpl-3.0
| 11,834
|
#------------------------------------------------------------------------------
# clob_string.py (Section 7.2)
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
#------------------------------------------------------------------------------
from __future__ import print_function
import cx_Oracle
import db_config
con = cx_Oracle.connect(db_config.user, db_config.pw, db_config.dsn)
cur = con.cursor()
print("Inserting data...")
cur.execute("truncate table testclobs")
longString = ""
for i in range(5):
char = chr(ord('A') + i)
longString += char * 250
cur.execute("insert into testclobs values (:1, :2)",
(i + 1, "String data " + longString + ' End of string'))
con.commit()
def OutputTypeHandler(cursor, name, defaultType, size, precision, scale):
if defaultType == cx_Oracle.CLOB:
return cursor.var(cx_Oracle.LONG_STRING, arraysize = cursor.arraysize)
con.outputtypehandler = OutputTypeHandler
print("Querying data...")
cur.prepare("select * from testclobs where id = :id")
cur.execute(None, {'id': 1})
(id, clobdata) = cur.fetchone()
print("CLOB length:", len(clobdata))
print("CLOB data:", clobdata)
|
cmsdaq/hltd
|
lib/cx_Oracle-7.1/samples/tutorial/clob_string.py
|
Python
|
lgpl-3.0
| 1,342
|
from osp.conf.settings import *
# Unique key used for salting passwords
SECRET_KEY = 'Chac-8#haCa_Ra-e?-e+ucrur=gEFRasejayasaC?meMe!AC-a'
# DEBUG should be False in production, True in development
DEBUG = False
# List of administrators who should receive error reports
ADMINS = (
('John Smith', 'john.smith@example.edu'),
('Francis Drake', 'francis.drake@example.edu'),
)
MANAGERS = ADMINS
# List of developers who receive email messages in debug mode
DEBUG_USERS = ADMINS
# MySQL database configuration settings
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'osp',
'USER': 'osp',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# Server time zone
TIME_ZONE = 'America/New_York'
# Used if you are hosting OSP off the top level (e.g. http://example.edu/osp/)
URL_PREFIX = ''
# The URL path at which media is being served
MEDIA_URL = URL_PREFIX + '/media/'
# The URL path at which admin media is being served
ADMIN_MEDIA_PREFIX = URL_PREFIX + '/media/admin/'
# Uncomment the following lines if you are using the LDAP backend
#
# import ldap
# from django_auth_ldap.config import LDAPSearch
#
# AUTHENTICATION_BACKENDS = [
# 'django_auth_ldap.backend.LDAPBackend',
# 'django.contrib.auth.backends.ModelBackend',
# ]
# AUTH_LDAP_SERVER_URI = 'ldap://ldap.example.edu'
# AUTH_LDAP_BIND_DN = 'service_user'
# AUTH_LDAP_BIND_PASSWORD = 'service_password'
# AUTH_LDAP_USER_SEARCH = LDAPSearch('ou=Users,dc=example,dc=edu',
# ldap.SCOPE_SUBTREE,
# '(uid=%(user)s)')
# AUTH_LDAP_USER_ATTR_MAP = {
# 'first_name': 'givenName',
# 'last_name': 'sn',
# 'email': 'mail'
# }
# LOGIN_REDIRECT_URL = URL_PREFIX + '/'
# Uncomment the following lines if you are using the CAS backend
#
# AUTHENTICATION_BACKENDS = [
# 'django.contrib.auth.backends.ModelBackend',
# 'django_cas.backends.CASBackend',
# ]
# MIDDLEWARE_CLASSES.append('django_cas.middleware.CASMiddleware')
# CAS_VERSION = '1'
# CAS_SERVER_URL = 'https://cas.example.edu'
# CAS_IGNORE_REFERER = True
# CAS_REDIRECT_URL = URL_PREFIX + '/'
# The URL paths for login and logout pages
LOGIN_URL = URL_PREFIX + '/login/'
LOGOUT_URL = URL_PREFIX + '/logout/'
# SMTP mail server configuration settings
EMAIL_HOST = 'smtp.example.edu'
EMAIL_PORT = 25
EMAIL_HOST_USER = 'service_user'
EMAIL_HOST_PASSWORD = 'service_password'
# List of IP addresses for hosts allowed to push data to the API
API_ALLOWED_HOSTS = []
# Authorization key for pushing data to the API
API_KEY = ''
# Email address that intervention requests are sent to
INTERVENTIONS_EMAIL = 'interventions@example.edu'
# "From" email address for the application
SERVER_EMAIL = 'osp@example.edu'
DEFAULT_FROM_EMAIL = SERVER_EMAIL
# All potential term choices that could be received by the API
TERM_CHOICES = [
('fa', 'Fall'),
('sp', 'Spring'),
('su', 'Summer'),
]
# Current year and term
CURRENT_TERM = 'su'
CURRENT_YEAR = 2011
# All potential enrollment status choices that could be received by the API
ENROLLMENT_STATUS_CHOICES = [
('A', 'Active'),
('D', 'Dropped'),
('W', 'Withdrawn'),
('X', 'Deleted'),
('C', 'Cancelled'),
('NP', 'Non-payment'),
]
# Enrollment statuses which are considered "active"
ACTIVE_ENROLLMENT_STATUSES = ['A',]
# List of campuses for your school
CAMPUS_CHOICES = [
'Main',
'Uptown',
]
# List of contact types for visits
VISIT_CONTACT_TYPE_CHOICES = [
'In Person',
'Email',
'Telephone',
'Online',
'Group Session',
]
# List of reasons for visits
VISIT_REASON_CHOICES = [
'New Student Admission',
'Academic Advising',
'Counseling',
'Personal Counseling',
'Early Alert Referral',
'Graduation Assessment Review',
'Career Counseling',
'Workshops, Class Presentations',
'Early Alert Counseling',
'Disability Counseling',
'Faculty Advising',
'Academic Warning',
'Academic Probation',
'First Academic Suspension',
'Final Academic Suspension',
]
# List of departments for visits
VISIT_DEPARTMENT_CHOICES = [
'Advising',
'Counseling',
]
# List of Career Services outcomes for visits
VISIT_CAREER_SERVICES_OUTCOME_CHOICES = [
'No Contact',
'Email',
'Phone',
'Scheduled Appointment with Career Services',
'No Show for Appointment',
'Took Career Assessment(s)',
'Met with Career Counselor',
'Career Decision in Process',
'Career and Program Decision Completed',
'Referred for Program Update',
'Program Updated',
]
# List of intervention reasons
INTERVENTION_REASONS = [
'Excessive Tardiness/Absenteeism',
'Failing Test/Quiz Scores',
'Missing Assignments',
'Needs Personal or Social Counseling',
'Needs Career Exploration',
'Needs Tutoring',
]
# Re-structure the choices lists for Django's sake
CAMPUS_CHOICES = [(x, x) for x in CAMPUS_CHOICES]
VISIT_CONTACT_TYPE_CHOICES = [(x, x) for x in VISIT_CONTACT_TYPE_CHOICES]
VISIT_REASON_CHOICES = [(x, x) for x in VISIT_REASON_CHOICES]
VISIT_DEPARTMENT_CHOICES = [(x, x) for x in VISIT_DEPARTMENT_CHOICES]
VISIT_CAREER_SERVICES_OUTCOME_CHOICES = [(x, x) for x in
VISIT_CAREER_SERVICES_OUTCOME_CHOICES]
INTERVENTION_REASONS = [(x, x) for x in INTERVENTION_REASONS]
|
marklocklear/Online-Student-Profile
|
deploy/osp_settings.py
|
Python
|
lgpl-3.0
| 5,374
|
"""This is the Cloud Signature Bot based on Time4Mind and Telegram
It allow to sign documents using a Telegram chat and a Time4Mind account
"""
import sys
import os
import yaml
import logging
import time
import datetime
import uuid
import urllib.request
import shutil
import re
import magic
import json
from threading import Thread
from queue import Queue
from time4mind import Time4Mind
from telegram.ext import Updater, CommandHandler
from telegram.ext import MessageHandler, Filters
from telegram import Bot
from flask import Flask, jsonify, abort, make_response, request
from pkboxsoap import PkBoxSOAP
# methods for a "poor man" data persistence based on a yaml file
def acl_load():
try:
with open(cfg['acl'], 'r') as yml_file: acl = yaml.load(yml_file)
except:
logging.warning("failed to read acl file: " + str(cfg['acl']))
acl = dict()
return acl
def acl_update(user_info):
acl = acl_load()
if user_info['id'] not in acl:
acl[user_info['id']] = dict()
for k in user_info:
acl[user_info['id']][k] = user_info[k]
acl_dump(acl)
def acl_dump(acl):
try:
with open(cfg['acl'], 'w+') as yml_file: yml_file.write(yaml.dump(acl))
#logging.info(yaml.dump(acl))
except:
logging.critical("error writing acl file: " + str(cfg['acl']))
def acl_set_status(user_id,status):
acl = acl_load()
if user_id not in acl:
logging.error('user_id ' + str(user_id) + 'not found in acl file:' \
+ str(cfg['acl']))
return None
acl[user_id]['status'] = status
acl_dump(acl)
def acl_get_user_info(user_id):
acl = acl_load()
if user_id not in acl:
return None
return acl[user_id]
# queue consumer
def process_queue(args):
(queue, bot, acl_set_status) = args
while True:
q_msg = queue.get()
logging.info('queue.get() : ' + repr(q_msg))
# auth transaction
if q_msg['type'] == "authorization":
transaction = q_msg['content']
acl_set_status(q_msg['chat_id'],"authorized")
message = 'You have been authorized. Now send me a file to sign!'
try:
bot.sendMessage(chat_id=q_msg['chat_id'], text=message)
except:
logging.warning('error sending auth confirmation for transaction '\
+ '\ncontent: ' + str(transaction) \
+ '\nbot: ' + str(bot) \
+ '\nchat_id: ' + str(q_msg['chat_id']) \
+ '\nuser_id: ' + str(q_msg['user_id']) )
else:
logging.info('authorized user: ' + str(q_msg['user_id']))
# sign transaction
elif q_msg['type'] == "signature":
# retrive file info
operation_uuid4 = q_msg['operation_uuid4']
yml_pathname = cfg['storage'] + '/' + operation_uuid4 + '.yml'
try:
with open(yml_pathname, 'r') as yml_file:
docs = yaml.load(yml_file)
#logging.info(repr(docs))
except:
logging.warning('error retriving saved info for operation: '\
+ operation_uuid4 \
+ " from " + yml_pathname)
else:
logging.info("process_queue() operation " + operation_uuid4 \
+ " retrived info from " + yml_pathname)
# setup transaction signing otp
transaction = q_msg['content']
#bot.sendMessage(chat_id=q_msg['chat_id'], text=str(transaction))
try:
received_otp = json.loads(transaction['otp'])
except Exception as inst:
logging.debug(inst.args)
sign_otp = dict()
sign_otp['KeyPIN'] = received_otp['KeyPIN']
sign_otp['SessionKey'] = received_otp['SessionKey']
sign_otp['PIN'] = str(transaction['pin'])
logging.debug("process_queue() sign_otp: " + str(json.dumps(sign_otp)) )
# sign
parent_dir = cfg['storage'] + '/' + str(q_msg['chat_id'])
directory = parent_dir + '/' + operation_uuid4 + '/'
for file_item in docs['list']:
# retrive user certificate alias
user_info = acl_get_user_info(q_msg['user_id'])
signer = user_info['cred']['alias']
if 'domain' in cfg['pkbox'] and cfg['pkbox']['domain'] == "open":
signer = '[' + user_info['cred']['domain'] + ']_' + signer
# retrive file info
pathname = directory + file_item['file_id']
filetype = 'p7m'
if re.match(r'PDF document.*', magic.from_file(pathname)):
filetype = 'pdf'
# call pkbox for signing
logging.info("process_queue() operation " + operation_uuid4 \
+ " signing file: " + pathname)
#bot.sendMessage(chat_id=q_msg['chat_id'], text=str(json.dumps(sign_otp)))
result = sign_service.envelope(pathname, filetype, signer,
str(transaction['pin']),
str(json.dumps(sign_otp)))
# evaluate result
index = docs['list'].index(file_item)
if result == 'ok':
if filetype == "pdf":
docs['list'][index]['new_name'] = \
'SIGNED_' + docs['list'][index]['file_name']
else:
docs['list'][index]['new_name'] = \
docs['list'][index]['file_name'] + '.p7m'
logging.info('user ' + str(q_msg['user_id']) \
+ ' signed documents in operation: ' \
+ operation_uuid4 )
else:
docs['list'][index]['result'] = str(result)
logging.warning("envelope() returned " + str(result)
+ ' signing document for operation:'\
+ operation_uuid4)
# TODO:
# if pdfsign fail because protected with a password)
# it should return a msg to request sign it as p7m
# send message and signed files
for file_item in docs['list']:
pathname = directory + file_item['file_id']
if not 'new_name' in file_item:
message = 'Error signing file: ' + file_item['file_name'] \
+ " with result " + file_item['result']
bot.sendMessage(chat_id=q_msg['chat_id'], text=message)
elif not os.path.exists(pathname):
logging.warning("not found " + pathname)
message = 'Error reading signed file: ' + file_item['new_name']
bot.sendMessage(chat_id=q_msg['chat_id'], text=message)
else:
bot.sendDocument( chat_id=q_msg['chat_id'],
document=open(pathname, 'rb'),
filename=file_item['new_name'])
os.remove(pathname)
# remove yaml file and operation dir
os.remove(yml_pathname)
os.rmdir(directory)
# try remove also chat_id dir if empty
try:
os.rmdir(parent_dir)
except:
pass
q.task_done()
# flask webserver to handle callback
app = Flask(__name__)
# function to start webserver as a thread
def flask_thread():
if 'listenaddr' in cfg['webserver']:
listenaddr = cfg['webserver']['listenaddr']
else:
listenaddr = '127.0.0.1'
app.run(host=listenaddr,debug=True, use_reloader=False)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
@app.route('/api/v1.0/authorize/<int:chat_id>/<int:user_id>', methods=['POST'])
def get_authorization(chat_id,user_id):
if any([ not request.json,
not user_id,
not chat_id ]):
logging.debug(request)
abort(400)
# process callback
try:
for transaction in request.json:
if transaction['approved'] == 1:
q_msg = dict()
q_msg['user_id'] = user_id
q_msg['chat_id'] = chat_id
q_msg['type'] = "authorization"
q_msg['content'] = transaction
q.put(q_msg)
except:
logging.error("failed processing transaction callback")
return jsonify({'authorization': 'received'}), 200
@app.route('/api/v1.0/sign/<int:chat_id>/<int:user_id>/<string:operation_uuid4>', methods=['POST'])
def get_signature(chat_id,user_id,operation_uuid4):
if any([ not request.json,
not operation_uuid4,
not chat_id,
not user_id ]):
logging.debug(request)
abort(400)
# process callback
try:
logging.debug(request)
for transaction in request.json:
if transaction['approved'] == 1:
q_msg = dict()
q_msg['chat_id'] = chat_id
q_msg['user_id'] = user_id
q_msg['operation_uuid4'] = operation_uuid4
q_msg['type'] = "signature"
q_msg['content'] = transaction
q.put(q_msg)
except:
logging.error("failed processing signature transaction callback")
return jsonify({'authorization': 'received'}), 200
"""
Example of a auth transaction APPROVED:
{
'pin': '',
'result': [],
'applicationId': None,
'transactionId': '954593fc-3162-4077-9731-af8aab27dda5',
'approved': 1,
'otp': 'E77337B8CC3FD9C5DB805B123259149BC9313A169D9319157187D91205214CFC',
'antiFraud': '[]'
}
Example of a sign transaction APPROVED:
{
'result': ['@pin'],
'pin': 'xI1lhMbAiALTCZ3I71bQIQ==',
'otp': '{
"SessionKey":"Z4NnyTUgUePgSNSAgPiiysY2yIB+lSZg1xXUArOK1zJq11JqqCJ3plTGysynjeu1uhHSM\\/4SvaBHqDjL6NIjmustOITo2dOf3DVzTyk3RIjCh9XWANNWFhgaMMmWI6B8NBA\\/tQ6+bztTt4PJ3OJwwdAI0u\\/EuDZLSCvdcUfohyg=",
"KeyPIN":"BNcuQZWbdcpZeMESzTPfKA==",
"Ciphered":true
}',
'applicationId': None,
'approved': 1,
'docSignatureResult': '[]',
'transactionId': 'd6d76bdc-23ab-473d-b9c8-a9632c147656',
'antiFraud': '[]'
}
Example of a transaction REFUSED:
{
'approved': 2,
'applicationId': None,
'transactionId': '8f52c58f-9f69-44e9-b716-d7dc1c69a6b4'
}
"""
############################
# define telegram functions
def start(bot, update):
bot.sendMessage(chat_id=update.message.chat_id,
text="To use this bot you should have:\n" \
+ "* an e-sign certficate\n" \
+ "* the Valid mobile app installed with the e-sign OTP enabled\n\n" \
+ "Link your Valid account with the command /link followed " \
+ "by the username (usually the email)\n\n" \
+ "An authorization request will be sent to your Valid mobile app.")
def status(bot, update):
user_info = acl_get_user_info(update.message.from_user.id)
if not user_info:
return
if user_info['status'] == "authorized":
text="You are already authorized to use Valid account *" \
+ str(user_info['time4mind_account']) +'*'
elif user_info['status'] == "waiting authorization":
text="I'm waiting your authorization from Valid app\n" + str(user_info)
else:
text="You are not yet authorized"
bot.sendMessage(chat_id=update.message.chat_id, text=text, parse_mode="Markdown")
def link(bot, update, args):
# check arguments
if len(args) != 1:
text = 'Please, pass me only one string without spaces'
bot.sendMessage(chat_id=update.message.chat_id, text=text)
return
# build telegram user data structure
user_info = dict()
user_info['id'] = update.message.from_user.id
user_info['time4mind_account'] = args[0]
user_info['first_name'] = update.message.from_user.first_name
user_info['last_name'] = update.message.from_user.last_name
user_info['username'] = update.message.from_user.username
user_info['chat_id'] = update.message.chat_id
user_info['status'] = 'waiting authorization'
if user_info['last_name']:
user_info['display_name'] = user_info['first_name'] + ' ' + user_info['last_name']
else:
user_info['display_name'] = user_info['first_name']
logging.info("/link command received from user: " + user_info['time4mind_account'])
# look for credentials
cred = time4mind.getMobileActiveCredentials(user_info['time4mind_account'])
if len(cred) < 1:
logging.warning("/link command did not found valid credentials for user: " + user_info['time4mind_account'])
message = 'Error sending an authorization request to this account'
bot.sendMessage(chat_id=update.message.chat_id, text=message)
else:
# TODO: choice if credentials > 1
user_info['cred'] = cred[0]
# send request
route = '/api/v1.0/authorize/' + str(user_info['chat_id']) \
+ '/' + str(user_info['id'])
try:
user_info['last_transaction'] = time4mind.authorize(user_info,route)
# save user data
acl_update(user_info)
# message user
message = 'I sent an authorization request to your Valid app'
bot.sendMessage(chat_id=update.message.chat_id, text=message)
except:
logging.warning("failed to request account usage authorization")
def sign_single_document(bot, update):
user_info = acl_get_user_info(update.message.from_user.id)
chat_id = update.message.chat_id
operation_uuid4 = str(uuid.uuid4())
logging.info("sign_single_document() operation " + operation_uuid4 \
+ " for user " + str(user_info))
if not user_info or 'status' not in user_info:
text="You are not yet authorized"
elif user_info['status'] == "waiting authorization":
text="Sorry but I'm still waiting your authorization from Valid app\n" \
+ str(user_info)
elif user_info['status'] == "authorized":
doc_info = update.message.document.__dict__
# {'mime_type': 'application/pdf', 'file_id': 'BQADBAADbwADNnbhUZSE6H4S95PIAg', 'file_name': '2017_ci_01_28.pdf', 'file_size': 71689}
file_id = doc_info['file_id']
file_info = bot.getFile(file_id)
# {'file_size': 71689, 'file_path': 'https://api.telegram.org/file/bot333930621:AAGJ4XLJ9UxQvfTEQXeKwOkiAvhTE5rdRJE/documents/file_0.pdf', 'file_id': 'BQADBAADbwADNnbhUZSE6H4S95PIAg'}
doc_info['file_path'] = file_info['file_path']
doc_info['href'] = cfg['webserver']['endpoint'] + '/api/v1.0/file/' \
+ operation_uuid4 + '/' + file_id
docs = { 'operation_uuid4': operation_uuid4,
'list': [ doc_info ] }
# save data to yaml
directory = cfg['storage']
if not os.path.exists(directory):
try:
os.makedirs(directory)
except:
logging.critical("error makedirs: " + str(directory))
yml_pathname = directory + '/' + operation_uuid4 + '.yml'
try:
with open(yml_pathname, 'w+') as yml_file: yml_file.write(yaml.dump(docs))
except:
logging.critical("error writing yml file: " + str(yml_pathname))
else:
logging.info("sign_single_document() operation " + operation_uuid4 \
+ " saved docs to " + yml_pathname)
# request to sign
signMobileRequest(user_info,docs)
text="Request to sign sent to your Valid app"
# download file
directory = cfg['storage'] + '/' + str(chat_id) + '/' + operation_uuid4 + '/'
if not os.path.exists(directory):
os.makedirs(directory)
with urllib.request.urlopen(doc_info['file_path']) as response, \
open(directory + doc_info['file_id'], 'wb') as out_file:
shutil.copyfileobj(response, out_file)
bot.sendMessage(chat_id=update.message.chat_id, text=text, parse_mode="Markdown")
def signMobileRequest(user_info,docs):
title = 'Signature Request'
sender = '@CloudSignature_Bot'
message = 'Documents to sign:'
for file_item in docs['list']:
message += '<li><a href=\"' + file_item['href'] + '\">' \
+ file_item['file_name'] + '</a></li>'
route = '/api/v1.0/sign/' \
+ str(user_info['chat_id']) + '/' \
+ str(user_info['id']) + '/' \
+ str(docs['operation_uuid4'])
try:
user_info['last_transaction'] = time4mind.signMobile(
user_info['cred']['otpId'],
user_info['cred']['otpProvider'],
title,sender,message,
user_info['cred']['label'],route)
except:
logging.warning("failed to request signature authorization")
else:
logging.info("signMobileRequest() sent to user: " + str(user_info['id']) \
+ " - operation: " + str(docs['operation_uuid4']) \
+ " - transaction: " + str(user_info['last_transaction']) )
try:
acl_update(user_info)
except:
logging.warning("failed to save transaction data")
def unknown_cmd(bot, update):
bot.sendMessage(chat_id=update.message.chat_id, text="Sorry, I didn't understand that command.")
def filter_any(msg):
logging.debug('Received message_id: '+str(msg.message_id))
if msg.text:
logging.debug('text: '+msg.text)
elif msg.document:
logging.debug('document: '+msg.document.file_name)
return False
###############
# Main section
# read configuration and setup time4mind class
with open(sys.argv[1], 'r') as yml_file: cfg = yaml.load(yml_file)
time4mind = Time4Mind(cfg)
# setup logger
logging.basicConfig(level=logging.DEBUG,
filename=cfg['logfile'],
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M')
# setup telegram updater and dispatchers
updater = Updater(token=cfg['bot']['token'])
dispatcher = updater.dispatcher
# begin telegram commands
# trace messages
trace_handler = MessageHandler(filter_any, lambda : True )
dispatcher.add_handler(trace_handler)
# start command
start_handler = CommandHandler('start', start)
dispatcher.add_handler(start_handler)
# link command
link_handler = CommandHandler('link', link, pass_args=True)
dispatcher.add_handler(link_handler)
# status command
status_handler = CommandHandler('status', status)
dispatcher.add_handler(status_handler)
# sign document filter
sign_handler = MessageHandler(Filters.document, sign_single_document)
dispatcher.add_handler(sign_handler)
# unknown commands
unknown_handler = MessageHandler(Filters.command, unknown_cmd)
dispatcher.add_handler(unknown_handler)
# end telegram commands
# setup queue
q = Queue(maxsize=100)
bot = Bot(cfg['bot']['token'])
dispatcher.run_async(process_queue,(q,bot,acl_set_status))
# setup pkbox handler to sign
sign_service = PkBoxSOAP(cfg['pkbox'])
# run updater and webserver as a threads
webserver_thread = Thread(target=flask_thread, name='webserver')
webserver_thread.start()
updater_thread = Thread(target=updater.start_polling, name='updater')
updater_thread.start()
|
cisba/cloudsignaturebot
|
cloudsignaturebot.py
|
Python
|
lgpl-3.0
| 20,001
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
from PyQt4.uic import loadUiType
from pyqtgraph.Qt import QtCore, QtGui
#from matplotlib.figure import Figure
from matplotlib import pyplot as plt
import numpy as np
from matplotlib.backends.backend_qt4agg import (
FigureCanvasQTAgg as FigureCanvas,
NavigationToolbar2QT as NavigationToolbar)
import selEpisodio
import matplotlib.dates as md
from sklearn import preprocessing
import colores
import lectorFichero as lf
DEBUG = 0
class PanelScatter():
def __init__(self, selep, layout, cbSueno, cbSedentario, cbLigera, cbModerada, cbIzq, cbDer, btnPrev, btnNext, label):
self.layoutMatplot1 = layout
self.cbSueno = cbSueno
self.cbSedentario = cbSedentario
self.cbLigera = cbLigera
self.cbModerada = cbModerada
self.cbx_izq = cbIzq
self.cbx_der = cbDer
self.btnPrev = btnPrev
self.btnNext = btnNext
self.label = label
self.selep = selep
self.configureComboBox()
self.updateView()
self.cbSueno.clicked.connect(self.filtrarSueno)
self.cbSedentario.clicked.connect(self.filtrarSedentario)
self.cbLigera.clicked.connect(self.filtrarLigera)
self.cbModerada.clicked.connect(self.filtrarModerada)
self.btnPrev.clicked.connect(self.retroceder)
self.btnNext.clicked.connect(self.avanzar)
self.cbx_izq.activated[str].connect(self.cbx_izqListener)
self.cbx_der.activated[str].connect(self.cbx_derListener)
self.filSueno = True
self.filSedentario = True
self.filLigero =True
self.filModerado = True
def configureComboBox(self):
print "Configurando combobox"
self.cbx_izq.clear()
self.cbx_der.clear()
for i in self.selep.epFiltro:
self.cbx_izq.addItem(i.nombre)
self.cbx_der.addItem(i.nombre)
if(len(self.selep.epFiltro) > 1):
self.cbx_der.setCurrentIndex(1)
else:
self.cbx_der.setCurrentIndex(0)
self.cbx_izq.setCurrentIndex(0)
def openFile(self):
self.selep = self.loadData()
self.configureComboBox()
self.limpiarLayout()
self.updateView()
def loadData(self):
if(DEBUG): fname = '../data.csv'
else: fname = QtGui.QFileDialog.getOpenFileName(self, 'Open file')
print "Abriendo fichero ", fname
csv = lf.LectorFichero(fname).getDatos()
selep = selEpisodio.selEpisodio(csv)
return selep
#ep 0 -> plot izquierdo
#ep 1 -> plot derecho
def getTime(self, a, b, ep):
if(ep == 0):
cbxId = self.cbx_izq.currentIndex()
else:
cbxId = self.cbx_der.currentIndex()
print "get time", cbxId
for i in self.selep.epFiltro[cbxId].temp:
if(a == i):
ind = 0
for k in self.selep.epFiltro[cbxId].flujo:
if(b == k):
print "encontrado"
return self.selep.epFiltro[cbxId].tiempo[ind]
else:
ind += 1
def onpick(self, event, ep):
thisline = event.artist
xdata, ydata = thisline.get_data()
ind = event.ind
print xdata[ind[0]], ydata[ind[0]]
self.label.setText('Instante ' + str(self.getTime(xdata[ind[0]], ydata[ind[0]], ep)))
def creaFiguras(self, ep):
""" ep: tiempo, temp, flujo"""
#Serie temporal
fig0 = plt.figure(tight_layout=True)
#Normalizar
preprocessing.scale(ep.temp, copy=True)
preprocessing.scale(ep.flujo, copy=True)
#Curva temperatura
ax1 = fig0.add_subplot(111)
ax1.plot(ep.tiempo, ep.temp, '-', color=colores.temperatura)
ax1.set_ylim([self.selep.csv.cotas.temp_min,self.selep.csv.cotas.temp_max])
#ax1.set_xlabel('Tiempo (m)')
ax1.set_ylabel('Temperatura (ºC)', color=colores.temperatura)
for tl in ax1.get_yticklabels():
tl.set_color(colores.temperatura)
fig0.autofmt_xdate()
xfmt = md.DateFormatter('%H:%M')
ax1.xaxis.set_major_formatter(xfmt)
start, end = ax1.get_xlim()
#ax1.xaxis.set_ticks(np.arange(start, end, 30))
ax1.grid(True)
#Curva flujo térmico
ax2 = ax1.twinx()
ax2.plot(ep.tiempo, ep.flujo, '-', color=colores.flujo)
ax2.set_ylim([self.selep.csv.cotas.flujo_min, self.selep.csv.cotas.flujo_max])
ax2.set_ylabel('Flujo térmico', color=colores.flujo)
for tl in ax2.get_yticklabels():
tl.set_color(colores.flujo)
#Scatterplot
#Lineas verticales con la clasificación de sueños
if(ep.tipo == selEpisodio.tipoSueno):
profundo = self.selep.getProfundo(ep.ini, ep.fin)
despierto = self.selep.getDespierto(ep.ini, ep.fin)
for i in profundo:
ax1.axvspan(i[0], i[1], facecolor=colores.suenoProfundo, alpha=0.3, edgecolor=colores.suenoProfundo)
for i in despierto:
ax1.axvspan(i[0], i[1], facecolor=colores.despierto, alpha=0.5, edgecolor=colores.despierto)
fig1 = plt.figure(tight_layout=True)
ax1f1 = fig1.add_subplot(111)
k = 0
for i in range(ep.ini, ep.fin):
t = self.selep.getColorSueno(i)
ax1f1.plot(ep.temp[k], ep.flujo[k], 'o', picker=5, color=t)
k+=1
ax1f1.set_xlabel('Temperatura (ºC)', color=colores.temperatura)
ax1f1.set_ylabel('Flujo térmico', color=colores.flujo)
else:
fig1 = plt.figure(tight_layout=True)
ax1f1 = fig1.add_subplot(111)
line, = ax1f1.plot(ep.temp, ep.flujo, 'o', picker=5, color = "b")
ax1f1.set_xlabel('Temperatura (ºC)', color=colores.temperatura)
ax1f1.set_ylabel('Flujo térmico', color=colores.flujo)
#ax1f1.set_xlim([self.selep.csv.cotas.temp_min, self.selep.csv.cotas.temp_max])
#ax1f1.set_ylim([self.selep.csv.cotas.flujo_min, self.selep.csv.cotas.flujo_max])
return fig0, fig1
def crearWidget(self, ep, derecho):
"""
ep: Episodio a visualizar
derecho: 0/1 episodio izquierdo o derecho
"""
fig10, fig11 = self.creaFiguras(ep)
canvas1 = FigureCanvas(fig10)
canvas2 = FigureCanvas(fig11)
vbox = QtGui.QGridLayout()
vbox.addWidget(QtGui.QLabel("<b>Episodio:</b> " + ep.nombre))
vbox.addWidget(QtGui.QLabel("<b>Inicio:</b> " + str(ep.tiempo[0])))
vbox.addWidget(QtGui.QLabel("<b>Final:</b> " + str(ep.tiempo[-1])))
vbox.addWidget(QtGui.QLabel("<b>Duración:</b> %s min" % (ep.tiempo[-1] - ep.tiempo[0])))
vbox.addWidget(QtGui.QLabel("<b>Coeficiente de correlación:</b> " + str(ep.correlacion)[:5]))
vbox.addWidget(QtGui.QLabel("<b>Calorías consumidas:</b> " + str(ep.numCalorias)[:6] + " (" + str(ep.numCalorias/self.selep.totalCal*100)[:4] + "%)"))
vbox.addWidget(canvas1)
vbox.addWidget(canvas2)
canvas2.mpl_connect('pick_event', lambda event: self.onpick(event, derecho))
return vbox
#Inserta elementos en el layout con los nuevos episodios
def updateView(self):
if(len(self.selep.epFiltro) > 0):
self.vbox = self.crearWidget(self.selep.epFiltro[self.cbx_izq.currentIndex()], 0)
self.layoutMatplot1.addLayout(self.vbox)
if(len(self.selep.epFiltro) > 1):
self.vbox2 = self.crearWidget(self.selep.epFiltro[self.cbx_der.currentIndex()], 1)
self.layoutMatplot1.addLayout(self.vbox2)
#Elimina el contenido del layout actual
def limpiarLayout(self):
plt.close('all') #Cerrar todos las gráficas dibujadas para vaciar memoria
for cnt in reversed(range(self.vbox.count())):
widget = self.vbox.takeAt(cnt).widget()
if widget is not None:
widget.deleteLater()
for cnt in reversed(range(self.vbox2.count())):
widget = self.vbox2.takeAt(cnt).widget()
if widget is not None:
widget.deleteLater()
def filtrarSueno(self):
print "Filtrar sueño", self.cbSueno.isChecked()
self.filSueno = self.cbSueno.isChecked() #Cambiar el filtro
self.selep.update(self.filSueno, self.filSueno, self.filSedentario, self.filLigero, self.filModerado) #Actualizar el array de episodios filtrados
self.configureComboBox()
self.limpiarLayout()
self.updateView()
def filtrarSedentario(self):
print "Filtrar sedentario"
self.filSedentario = self.cbSedentario.isChecked()
self.selep.update(self.filSueno, self.filSueno, self.filSedentario, self.filLigero, self.filModerado)
self.configureComboBox()
self.limpiarLayout()
self.updateView()
def filtrarLigera(self):
print "Filtrar ligera"
self.filLigero = self.cbLigera.isChecked()
self.selep.update(self.filSueno, self.filSueno, self.filSedentario, self.filLigero, self.filModerado)
self.configureComboBox()
self.limpiarLayout()
self.updateView()
def filtrarModerada(self):
print "Filtrar moderada"
self.filModerado = self.cbModerada.isChecked()
self.selep.update(self.filSueno, self.filSueno, self.filSedentario, self.filLigero, self.filModerado)
self.configureComboBox()
self.limpiarLayout()
self.updateView()
def retroceder(self):
idxI = self.cbx_izq.currentIndex()
idxD = self.cbx_der.currentIndex()
if (idxI > 0):
self.cbx_izq.setCurrentIndex(idxI-1)
if(idxD > 0):
self.cbx_der.setCurrentIndex(idxD-1)
print "episodios", self.cbx_izq.currentIndex(), "y", self.cbx_der.currentIndex()
self.limpiarLayout()
self.updateView()
def avanzar(self):
idxI = self.cbx_izq.currentIndex()
idxD = self.cbx_der.currentIndex()
if (idxI < len(self.selep.epFiltro) - 1):
self.cbx_izq.setCurrentIndex(idxI+1)
if(idxD < len(self.selep.epFiltro) - 1):
self.cbx_der.setCurrentIndex(idxD+1)
print "episodios", self.cbx_izq.currentIndex(), "y", self.cbx_der.currentIndex()
self.limpiarLayout()
self.updateView()
def cbx_izqListener(self):
print "episodios", self.cbx_izq.currentIndex(), "y", self.cbx_der.currentIndex()
self.limpiarLayout()
self.updateView()
def cbx_derListener(self):
print "episodios", self.cbx_izq.currentIndex(), "y", self.cbx_der.currentIndex()
self.limpiarLayout()
self.updateView()
|
acrsilva/animated-zZz-machine
|
bundle_final_app/source/panelScatter.py
|
Python
|
lgpl-3.0
| 11,134
|
# Copyright (C) 2015-2022 by the RBniCS authors
#
# This file is part of RBniCS.
#
# SPDX-License-Identifier: LGPL-3.0-or-later
import os
import sys
from rbnics.utils.config import Config
def test_config(tempdir):
# Create a default configuration
config = Config()
# Write config to stdout
print("===============")
config.write(sys.stdout)
print("===============")
# Change options
config.set("backends", "online backend", "online")
config.set("problems", "cache", {"disk"})
# Write config to stdout
print("===============")
config.write(sys.stdout)
print("===============")
# Write config to file
config.write(os.path.join(tempdir, ".rbnicsrc"))
# Check that file has been written
assert os.path.isfile(os.path.join(tempdir, ".rbnicsrc"))
# Read back in
config2 = Config()
config2.read(tempdir)
# Write config2 to stdout
print("===============")
config2.write(sys.stdout)
print("===============")
# Check that read was successful
assert config == config2
|
mathLab/RBniCS
|
tests/unit/utils/config/test_config.py
|
Python
|
lgpl-3.0
| 1,071
|
# -*- coding: utf-8 -*-
print '''<!DOCTYPE html><html>'''
incluir(data,"head")
print '''<body>'''
incluir(data,"header")
print '''</body></html>'''
|
ZerpaTechnology/AsenZor
|
apps/votSys/user/vistas/templates/inicio.py
|
Python
|
lgpl-3.0
| 147
|
import argparse
from getpass import getpass
import json
import sys
import textwrap
import zmq
import colorama
from colorama import Fore
def execute(code):
ctx = zmq.Context.instance()
ctx.setsockopt(zmq.LINGER, 50)
repl_in = ctx.socket(zmq.PUSH)
repl_in.connect('tcp://127.0.0.1:2000')
repl_out = ctx.socket(zmq.PULL)
repl_out.connect('tcp://127.0.0.1:2001')
with repl_in, repl_out:
msg = (b'xcode1', code.encode('utf8'))
repl_in.send_multipart(msg)
while True:
data = repl_out.recv_multipart()
msg_type = data[0].decode('ascii')
msg_data = data[1].decode('utf8')
if msg_type == 'finished':
print('--- finished ---')
break
elif msg_type == 'stdout':
print(msg_data, end='')
sys.stdout.flush()
elif msg_type == 'stderr':
print(Fore.RED + msg_data + Fore.RESET, end='', file=sys.stderr)
sys.stderr.flush()
elif msg_type == 'waiting-input':
opts = json.loads(msg_data)
if opts['is_password']:
t = getpass(prompt='')
else:
t = input()
repl_in.send_multipart([b'input', t.encode('utf8')])
else:
print('--- other msg ---')
print(msg_type)
print(msg_data)
sources = {
'interleaving': '''
import sys
print('asdf', end='', file=sys.stderr)
print('qwer', end='', file=sys.stdout)
print('zxcv', file=sys.stderr)
''',
'long_running': '''
import time
for i in range(10):
time.sleep(1)
print(i)
''',
'user_input': '''
import hashlib
import getpass
print('Please type your name.')
name = input('>> ')
print('Hello, {0}'.format(name))
print('Please type your password.')
pw = getpass.getpass()
m = hashlib.sha256()
m.update(pw.encode('utf8'))
print('Your password hash is {0}'.format(m.hexdigest()))
''',
'early_exception': '''a = wrong-+****syntax''',
'runtime_error': '''
def x():
raise RuntimeError('asdf')
def s():
x()
if __name__ == '__main__':
s()
''',
'tensorflow': '''
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print(tf.test.is_gpu_available())
print('ok')'''
}
def main():
parser = argparse.ArgumentParser()
parser.add_argument('program_name')
args =parser.parse_args()
src = sources[args.program_name]
print('Test code:')
print(textwrap.indent(src, ' '))
print()
print('Execution log:')
execute(src)
if __name__ == '__main__':
colorama.init()
main()
|
lablup/sorna-repl
|
python-tensorflow/test_run.py
|
Python
|
lgpl-3.0
| 2,665
|
#----------------------------------------------------------------------
# ssd1306.py from https://github.com/guyc/py-gaugette
# ported by Guy Carpenter, Clearwater Software
#
# This library works with
# Adafruit's 128x32 SPI monochrome OLED http://www.adafruit.com/products/661
# Adafruit's 128x64 SPI monochrome OLED http://www.adafruit.com/products/326
# it should work with other SSD1306-based displays.
# The datasheet for the SSD1306 is available
# http://www.adafruit.com/datasheets/SSD1306.pdf
#
# The code is based heavily on Adafruit's Arduino library
# https://github.com/adafruit/Adafruit_SSD1306
# written by Limor Fried/Ladyada for Adafruit Industries.
#
# Some important things to know about this device and SPI:
#
# - The SPI interface has no MISO connection. It is write-only.
#
# - The spidev xfer and xfer2 calls overwrite the output buffer
# with the bytes read back in during the SPI transfer.
# Use writebytes instead of xfer to avoid having your buffer overwritten.
#
# - The D/C (Data/Command) line is used to distinguish data writes
# and command writes - HIGH for data, LOW for commands. To be clear,
# the attribute bytes following a command opcode are NOT considered data,
# data in this case refers only to the display memory buffer.
# keep D/C LOW for the command byte including any following argument bytes.
# Pull D/C HIGH only when writting to the display memory buffer.
#
# SPI and GPIO calls are made through an abstraction library that calls
# the appropriate library for the platform.
# For the RaspberryPi:
# wiring2
# spidev
# For the BeagleBone Black:
# Adafruit_BBIO.SPI
# Adafruit_BBIO.GPIO
#
# - The pin connections between the BeagleBone Black SPI0 and OLED module are:
#
# BBB SSD1306
# P9_17 -> CS
# P9_15 -> RST (arbirary GPIO, change at will)
# P9_13 -> D/C (arbirary GPIO, change at will)
# P9_22 -> CLK
# P9_18 -> DATA
# P9_3 -> VIN
# N/C -> 3.3Vo
# P9_1 -> GND
#----------------------------------------------------------------------
import gaugette.platform
import gaugette.gpio
import gaugette.spi
import gaugette.font5x8
import time
import sys
class SSD1306:
# Class constants are externally accessible as gaugette.ssd1306.SSD1306.CONST
# or my_instance.CONST
EXTERNAL_VCC = 0x1
SWITCH_CAP_VCC = 0x2
SET_LOW_COLUMN = 0x00
SET_HIGH_COLUMN = 0x10
SET_MEMORY_MODE = 0x20
SET_COL_ADDRESS = 0x21
SET_PAGE_ADDRESS = 0x22
RIGHT_HORIZ_SCROLL = 0x26
LEFT_HORIZ_SCROLL = 0x27
VERT_AND_RIGHT_HORIZ_SCROLL = 0x29
VERT_AND_LEFT_HORIZ_SCROLL = 0x2A
DEACTIVATE_SCROLL = 0x2E
ACTIVATE_SCROLL = 0x2F
SET_START_LINE = 0x40
SET_CONTRAST = 0x81
CHARGE_PUMP = 0x8D
SEG_REMAP = 0xA0
SET_VERT_SCROLL_AREA = 0xA3
DISPLAY_ALL_ON_RESUME = 0xA4
DISPLAY_ALL_ON = 0xA5
NORMAL_DISPLAY = 0xA6
INVERT_DISPLAY = 0xA7
DISPLAY_OFF = 0xAE
DISPLAY_ON = 0xAF
COM_SCAN_INC = 0xC0
COM_SCAN_DEC = 0xC8
SET_DISPLAY_OFFSET = 0xD3
SET_COM_PINS = 0xDA
SET_VCOM_DETECT = 0xDB
SET_DISPLAY_CLOCK_DIV = 0xD5
SET_PRECHARGE = 0xD9
SET_MULTIPLEX = 0xA8
MEMORY_MODE_HORIZ = 0x00
MEMORY_MODE_VERT = 0x01
MEMORY_MODE_PAGE = 0x02
# Device name will be /dev/spidev-{bus}.{device}
# dc_pin is the data/commmand pin. This line is HIGH for data, LOW for command.
# We will keep d/c low and bump it high only for commands with data
# reset is normally HIGH, and pulled LOW to reset the display
def __init__(self, gpio, spi, dc_pin="P9_15", reset_pin="P9_13", buffer_rows=64, buffer_cols=128, rows=32, cols=128):
self.gpio = gpio
self.spi = spi
self.cols = cols
self.rows = rows
self.buffer_rows = buffer_rows
self.mem_bytes = self.buffer_rows * self.cols >> 3 # total bytes in SSD1306 display ram
self.dc_pin = dc_pin
self.reset_pin = reset_pin
self.gpio.setup(self.reset_pin, self.gpio.OUT)
self.gpio.output(self.reset_pin, self.gpio.HIGH)
self.gpio.setup(self.dc_pin, self.gpio.OUT)
self.gpio.output(self.dc_pin, self.gpio.LOW)
self.font = gaugette.font5x8.Font5x8
self.col_offset = 0
self.bitmap = self.Bitmap(buffer_cols, buffer_rows)
self.flipped = False
def reset(self):
self.gpio.output(self.reset_pin, self.gpio.LOW)
time.sleep(0.010) # 10ms
self.gpio.output(self.reset_pin, self.gpio.HIGH)
def command(self, *bytes):
# already low
# self.gpio.output(self.dc_pin, self.gpio.LOW)
self.spi.writebytes(list(bytes))
def data(self, bytes):
self.gpio.output(self.dc_pin, self.gpio.HIGH)
# chunk data to work around 255 byte limitation in adafruit implementation of writebytes
# revisit - change to 1024 when Adafruit_BBIO is fixed.
max_xfer = 255 if gaugette.platform.isBeagleBoneBlack else 1024
start = 0
remaining = len(bytes)
while remaining > 0:
count = remaining if remaining <= max_xfer else max_xfer
remaining -= count
self.spi.writebytes(bytes[start:start+count])
start += count
self.gpio.output(self.dc_pin, self.gpio.LOW)
def begin(self, vcc_state=SWITCH_CAP_VCC):
time.sleep(0.001) # 1ms
self.reset()
self.command(self.DISPLAY_OFF)
self.command(self.SET_DISPLAY_CLOCK_DIV, 0x80)
# support for 128x32 and 128x64 line models
if self.rows == 64:
self.command(self.SET_MULTIPLEX, 0x3F)
self.command(self.SET_COM_PINS, 0x12)
else:
self.command(self.SET_MULTIPLEX, 0x1F)
self.command(self.SET_COM_PINS, 0x02)
self.command(self.SET_DISPLAY_OFFSET, 0x00)
self.command(self.SET_START_LINE | 0x00)
if vcc_state == self.EXTERNAL_VCC:
self.command(self.CHARGE_PUMP, 0x10)
else:
self.command(self.CHARGE_PUMP, 0x14)
self.command(self.SET_MEMORY_MODE, 0x00)
self.command(self.SEG_REMAP | 0x01)
self.command(self.COM_SCAN_DEC)
self.command(self.SET_CONTRAST, 0x8f)
if vcc_state == self.EXTERNAL_VCC:
self.command(self.SET_PRECHARGE, 0x22)
else:
self.command(self.SET_PRECHARGE, 0xF1)
self.command(self.SET_VCOM_DETECT, 0x40)
self.command(self.DISPLAY_ALL_ON_RESUME)
self.command(self.NORMAL_DISPLAY)
self.command(self.DISPLAY_ON)
def clear_display(self):
self.bitmap.clear()
def invert_display(self):
self.command(self.INVERT_DISPLAY)
def flip_display(self, flipped=True):
self.flipped = flipped
if flipped:
self.command(self.COM_SCAN_INC)
self.command(self.SEG_REMAP | 0x00)
else:
self.command(self.COM_SCAN_DEC)
self.command(self.SET_COM_PINS, 0x02)
def normal_display(self):
self.command(self.NORMAL_DISPLAY)
def set_contrast(self, contrast=0x7f):
self.command(self.SET_CONTRAST, contrast)
def display(self):
self.display_block(self.bitmap, 0, 0, self.cols, self.col_offset)
def display_cols(self, start_col, count):
self.display_block(self.bitmap, 0, start_col, count, self.col_offset)
# Transfers data from the passed bitmap (instance of ssd1306.Bitmap)
# starting at row <row> col <col>.
# Both row and bitmap.rows will be divided by 8 to get page addresses,
# so both must divide evenly by 8 to avoid surprises.
#
# bitmap: instance of Bitmap
# The number of rows in the bitmap must be a multiple of 8.
# row: Starting row to write to - must be multiple of 8
# col: Starting col to write to.
# col_count: Number of cols to write.
# col_offset: column offset in buffer to write from
#
def display_block(self, bitmap, row, col, col_count, col_offset=0):
page_count = bitmap.rows >> 3
page_start = row >> 3
page_end = page_start + page_count - 1
col_start = col
col_end = col + col_count - 1
self.command(self.SET_MEMORY_MODE, self.MEMORY_MODE_VERT)
self.command(self.SET_PAGE_ADDRESS, page_start, page_end)
self.command(self.SET_COL_ADDRESS, col_start, col_end)
start = col_offset * page_count
length = col_count * page_count
self.data(bitmap.data[start:start+length])
# Diagnostic print of the memory buffer to stdout
def dump_buffer(self):
self.bitmap.dump()
def draw_pixel(self, x, y, on=True):
self.bitmap.draw_pixel(x, y, on)
def draw_text(self, x, y, string):
font_bytes = self.font.bytes
font_rows = self.font.rows
font_cols = self.font.cols
for c in string:
p = ord(c) * font_cols
for col in range(0, font_cols):
mask = font_bytes[p]
p += 1
for row in range(0, 8):
self.draw_pixel(x, y + row, mask & 0x1)
mask >>= 1
x += 1
def draw_text2(self, x, y, string, size=2, space=1):
font_bytes = self.font.bytes
font_rows = self.font.rows
font_cols = self.font.cols
for c in string:
p = ord(c) * font_cols
for col in range(0, font_cols):
mask = font_bytes[p]
p += 1
py = y
for row in range(0, 8):
for sy in range(0, size):
px = x
for sx in range(0, size):
self.draw_pixel(px, py, mask & 0x1)
px += 1
py += 1
mask >>= 1
x += size
x += space
def clear_block(self, x0, y0, dx, dy):
self.bitmap.clear_block(x0, y0, dx, dy)
def draw_text3(self, x, y, string, font):
return self.bitmap.draw_text(x, y, string, font)
def text_width(self, string, font):
return self.bitmap.text_width(string, font)
class Bitmap:
# Pixels are stored in column-major order!
# This makes it easy to reference a vertical slice of the display buffer
# and we use the to achieve reasonable performance vertical scrolling
# without hardware support.
def __init__(self, cols, rows):
self.rows = rows
self.cols = cols
self.bytes_per_col = rows >> 3
self.data = [0] * (self.cols * self.bytes_per_col)
def clear(self):
for i in range(0, len(self.data)):
self.data[i] = 0
# Diagnostic print of the memory buffer to stdout
def dump(self):
for y in range(0, self.rows):
mem_row = y >> 3
bit_mask = 1 << (y % 8)
line = ""
for x in range(0, self.cols):
mem_col = x
offset = mem_row + (self.rows >> 3) * mem_col
if self.data[offset] & bit_mask:
line += '*'
else:
line += ' '
print('|'+line+'|')
def draw_pixel(self, x, y, on=True):
if x < 0 or x >= self.cols or y < 0 or y >= self.rows:
return
mem_col = x
mem_row = y >> 3
bit_mask = 1 << (y % 8)
offset = mem_row + (self.rows >> 3) * mem_col
if on:
self.data[offset] |= bit_mask
else:
self.data[offset] &= (0xFF - bit_mask)
def clear_block(self, x0, y0, dx, dy):
for x in range(x0, x0 + dx):
for y in range(y0, y0 + dy):
self.draw_pixel(x, y, 0)
# returns the width in pixels of the string allowing for kerning & interchar-spaces
def text_width(self, string, font):
x = 0
prev_char = None
for c in string:
if c < font.start_char or c > font.end_char:
if prev_char != None:
x += font.space_width + prev_width + font.gap_width
prev_char = None
else:
pos = ord(c) - ord(font.start_char)
(width, offset) = font.descriptors[pos]
if prev_char != None:
x += font.kerning[prev_char][pos] + font.gap_width
prev_char = pos
prev_width = width
if prev_char != None:
x += prev_width
return x
def draw_text(self, x, y, string, font):
height = font.char_height
prev_char = None
for c in string:
if c < font.start_char or c > font.end_char:
if prev_char != None:
x += font.space_width + prev_width + font.gap_width
prev_char = None
else:
pos = ord(c) - ord(font.start_char)
(width, offset) = font.descriptors[pos]
if prev_char != None:
x += font.kerning[prev_char][pos] + font.gap_width
prev_char = pos
prev_width = width
bytes_per_row = (width + 7) >> 3
for row in range(0, height):
py = y + row
mask = 0x80
p = offset
for col in range(0, width):
px = x + col
if font.bitmaps[p] & mask:
self.draw_pixel(px, py, 1) # for kerning, never draw black
mask >>= 1
if mask == 0:
mask = 0x80
p += 1
offset += bytes_per_row
if prev_char != None:
x += prev_width
return x
# This is a helper class to display a scrollable list of text lines.
# The list must have at least 1 item.
class ScrollingList:
def __init__(self, ssd1306, list, font):
self.ssd1306 = ssd1306
self.list = list
self.font = font
self.position = 0 # row index into list, 0 to len(list) * self.rows - 1
self.offset = 0 # led hardware scroll offset
self.pan_row = -1
self.pan_offset = 0
self.pan_direction = 1
self.bitmaps = []
self.rows = ssd1306.rows
self.cols = ssd1306.cols
self.bufrows = self.rows * 2
downset = (self.rows - font.char_height) >> 1
for text in list:
width = ssd1306.cols
text_bitmap = ssd1306.Bitmap(width, self.rows)
width = text_bitmap.draw_text(0, downset, text, font)
if width > 128:
text_bitmap = ssd1306.Bitmap(width + 15, self.rows)
text_bitmap.draw_text(0, downset, text, font)
self.bitmaps.append(text_bitmap)
# display the first word in the first position
self.ssd1306.display_block(self.bitmaps[0], 0, 0, self.cols)
# how many steps to the nearest home position
def align_offset(self):
pos = self.position % self.rows
midway = self.rows >> 1
delta = (pos + midway) % self.rows - midway
return -delta
def align(self, delay=0.005):
delta = self.align_offset()
if delta != 0:
steps = abs(delta)
sign = delta // steps
for i in range(0, steps):
if i > 0 and delay > 0:
time.sleep(delay)
self.scroll(sign)
return self.position // self.rows
# scroll up or down. Does multiple one-pixel scrolls if delta is not >1 or <-1
def scroll(self, delta):
if delta == 0:
return
count = len(self.list)
step = (delta > 0) - (delta < 0) # step = 1 or -1
for i in range(0, delta, step):
if (self.position % self.rows) == 0:
n = self.position // self.rows
# at even boundary, need to update hidden row
m = (n + step + count) % count
row = (self.offset + self.rows) % self.bufrows
self.ssd1306.display_block(self.bitmaps[m], row, 0, self.cols)
if m == self.pan_row:
self.pan_offset = 0
self.offset = (self.offset + self.bufrows + step) % self.bufrows
self.ssd1306.command(self.ssd1306.SET_START_LINE | self.offset)
max_position = count * self.rows
self.position = (self.position + max_position + step) % max_position
# pans the current row back and forth repeatedly.
# Note that this currently only works if we are at a home position.
def auto_pan(self):
n = self.position // self.rows
if n != self.pan_row:
self.pan_row = n
self.pan_offset = 0
text_bitmap = self.bitmaps[n]
if text_bitmap.cols > self.cols:
row = self.offset # this only works if we are at a home position
if self.pan_direction > 0:
if self.pan_offset <= (text_bitmap.cols - self.cols):
self.pan_offset += 1
else:
self.pan_direction = -1
else:
if self.pan_offset > 0:
self.pan_offset -= 1
else:
self.pan_direction = 1
self.ssd1306.display_block(text_bitmap, row, 0, self.cols, self.pan_offset)
|
guyc/py-gaugette
|
gaugette/ssd1306.py
|
Python
|
lgpl-3.0
| 18,416
|
from django import template
# from kilonull.conf import BlogConf
from kilonull.models import Menu, MenuItem
from kilonull.settings import SETTINGS
import re
register = template.Library()
@register.simple_tag
def blog_title():
return SETTINGS['BLOG_TITLE']
# Generate HTML for the header menu.
# Use a regex (path) to check if the current page is in the menu. If it is,
# apply the active class.
@register.simple_tag
def get_menu(menu_slug, curr_page):
html = ""
menu_items = MenuItem.objects.filter(menu__slug=menu_slug) \
.order_by("order")
path = re.compile("%s(.*)" % SETTINGS['BLOG_SITE_URL'])
for item in menu_items:
html += "<li"
match = path.match(item.link_url)
if match and match.group(1) == curr_page:
html += " class='active'"
html += "><a href='%s'>%s</a></li>" % (item.link_url, item.link_text)
return html
|
vacuus/kilonull
|
kilonull/templatetags/theming.py
|
Python
|
lgpl-3.0
| 901
|
# -*- coding: utf-8 -*-
import logging
import odoo
from odoo.tools.func import lazy_property
from .sessionstore import PostgresSessionStore
_logger = logging.getLogger(__name__)
class RootTkobr(odoo.http.Root):
@lazy_property
def session_store(self):
# Setup http sessions
_logger.debug('HTTP sessions stored in Postgres')
return PostgresSessionStore(session_class=odoo.http.OpenERPSession)
root = RootTkobr()
odoo.http.root.session_store = root.session_store
|
meta-it/misc-addons
|
base_session_store_psql/http.py
|
Python
|
lgpl-3.0
| 500
|
"""Counter.py
Part of the AQuA Cesium Controller software package
author=Martin Lichtman
created=2013-10-19
modified>=2015-05-11
This file holds everything to model a National Instruments DAQmx counter.
It communicated to LabView via the higher up LabView(Instrument) class.
Saving of returned data is handled in the LabView class.
"""
from __future__ import division
__author__ = 'Martin Lichtman'
import logging
import numpy as np
from atom.api import Str, Float, Typed, Member, Bool, Int, List
from cs_instruments import Instrument
from instrument_property import Prop, ListProp
from analysis import AnalysisWithFigure
from sklearn import mixture
from scipy.optimize import curve_fit
from scipy.special import erf
import matplotlib.gridspec as gridspec
import time
logger = logging.getLogger(__name__)
gs = gridspec.GridSpec(2, 2)
gmix = mixture.GaussianMixture(n_components=2)
class Counters(Instrument):
version = '2015.05.11'
counters = Typed(ListProp)
def __init__(self, name, experiment, description=''):
super(Counters, self).__init__(name, experiment, description)
# start with a blank list of counters
self.counters = ListProp('counters', experiment, listElementType=Counter, listElementName='counter')
self.properties += ['version', 'counters']
class Counter(Prop):
"""Each individual counter has a field for the signal source, clock source, and clock rate (in Hz,
used only for internal clocking).
"""
counter_source = Str()
clock_source = Str()
clock_rate = Float()
def __init__(self, name, experiment, description=''):
super(Counter, self).__init__(name, experiment, description)
self.properties += ['counter_source', 'clock_source', 'clock_rate']
class CounterAnalysis(AnalysisWithFigure):
counter_array = Member()
binned_array = Member()
meas_analysis_path = Str()
meas_data_path = Str()
iter_analysis_path = Str()
update_lock = Bool(False)
iterationonly = Bool(False)
enable = Bool()
drops = Int(3)
bins = Int(25)
shots = Int(2)
ROIs = List([0])
graph_roi = Int(0)
def __init__(self, name, experiment, description=''):
super(CounterAnalysis, self).__init__(name, experiment, description)
self.meas_analysis_path = 'analysis/counter_data'
self.meas_data_path = 'data/counter/data'
self.iter_analysis_path = 'shotData'
self.properties += ['enable', 'drops', 'bins', 'shots', 'graph_roi','draw_fig','iterationonly']
def preIteration(self, iterationResults, experimentResults):
self.counter_array = []
self.binned_array = None
def format_data(self, array):
"""Formats raw 2D counter data into the required 4D format.
Formats raw 2D counter data with implicit stucture:
[ # counter 0
[ dropped_bins shot_time_series dropped_bins shot_time_series ... ],
# counter 1
[ dropped_bins shot_time_series dropped_bins shot_time_series ... ]
]
into the 4D format expected by the subsequent analyses"
[ # measurements, can have different lengths run-to-run
[ # shots array, fixed size
[ # roi list, shot 0
[ time_series_roi_0 ],
[ time_series_roi_1 ],
...
],
[ # roi list, shot 1
[ time_series_roi_0 ],
[ time_series_roi_1 ],
...
],
...
],
...
]
"""
rois, bins = array.shape[:2]
bins_per_shot = self.drops + self.bins # self.bins is data bins per shot
# calculate the number of shots dynamically
num_shots = int(bins/(bins_per_shot))
# calculate the number of measurements contained in the raw data
# there may be extra shots if we get branching implemented
num_meas = num_shots//self.shots
# build a mask for removing valid data
shot_mask = ([False]*self.drops + [True]*self.bins)
good_shots = self.shots*num_meas
# mask for the roi
ctr_mask = np.array(shot_mask*good_shots + 0*shot_mask*(num_shots-good_shots), dtype='bool')
# apply mask a reshape partially
array = array[:, ctr_mask].reshape((rois, num_meas, self.shots, self.bins))
array = array.swapaxes(0, 1) # swap rois and measurement axes
array = array.swapaxes(1, 2) # swap rois and shots axes
return array
def analyzeMeasurement(self, measurementResults, iterationResults, experimentResults):
if self.enable:
'''# number of shots is hard coded right now
bins_per_shot = self.drops + self.bins
num_shots = int(len(self.counter_array[-1])/bins_per_shot)
#if self.draw_fig:
# print "Number of shots: {}".format(num_shots)
# print "Bins per shot: {}".format(bins_per_shot)
# print "Length of counter array: {}".format(int(len(self.counter_array[-1])))
# counter array is appended every measurement so the counter hists can be calculated
# updated every cycle
# WARNING: counter_array only works with a single counter right now
self.binned_array = np.array([
self.counter_array[:, s*bins_per_shot + self.drops:(s+1)*bins_per_shot].sum(1)
for s in range(num_shots)
])'''
# MFE 2018/01: this analysis has been generalized such that multiple sub measurements can occur
# in the same traditional measurement
array = measurementResults[self.meas_data_path][()]
try:
# package data into an array with shape (sub measurements, shots, counters, time series data)
array = self.format_data(array)
# flatten the sub_measurements by converting top level to normal list and concatentating
self.counter_array += list(array)
except ValueError:
errmsg = "Error retrieving counter data. Offending counter data shape: {}"
logger.exception(errmsg.format(array.shape))
except:
logger.exception('Unhandled counter data exception')
# write this cycle's data into hdf5 file so that the threshold analysis can read it
# when multiple counter support is enabled, the ROIs parameter will hold the count
# Note the constant 1 is for the roi column parameter, all counters get entered in a single row
n_meas, n_shots, n_rois, bins = array.shape
sum_array = array.sum(axis=3).reshape((n_meas, n_shots, n_rois, 1))
measurementResults[self.meas_analysis_path] = sum_array
# put the sum data in the expected format for display
if self.binned_array is None:
self.binned_array = np.array([sum_array.reshape((n_meas, n_shots, n_rois))])
else:
self.binned_array = np.concatenate((
self.binned_array,
[sum_array.reshape((n_meas, n_shots, n_rois))]
))
if not self.iterationonly:
self.updateFigure()
def analyzeIteration(self, iterationResults, experimentResults):
if self.enable:
# recalculate binned_array to get rid of cut data
# iterationResults[self.iter_analysis_path] = self.binned_array
meas = map(int, iterationResults['measurements'].keys())
meas.sort()
path = 'measurements/{}/' + self.meas_analysis_path
try:
res = np.array([iterationResults[path.format(m)] for m in meas])
except KeyError:
# I was having problem with the file maybe not being ready
logger.warning("Issue reading hdf5 file. Waiting then repeating.")
time.sleep(0.1) # try again in a little
res = []
for m in meas:
try:
res.append(iterationResults[path.format(m)])
except KeyError:
msg = (
"Reading from hdf5 file during measurement `{}`"
" failed."
).format(m)
logger.exception(msg)
res = np.array(res)
total_meas = len(self.binned_array)
# drop superfluous ROI_columns dimension
self.binned_array = res.reshape(res.shape[:4])
logger.info('cut data: {}'.format(total_meas -
len(self.binned_array)))
iterationResults[self.iter_analysis_path] = self.binned_array
if self.iterationonly:
self.updateFigure()
return
def updateFigure(self):
if self.draw_fig:
if self.enable:
if not self.update_lock:
try:
self.update_lock = True
# There are two figures in an AnalysisWithFigure. Draw to the offscreen figure.
fig = self.backFigure
# Clear figure.
fig.clf()
# make one plot
# Single shot
ax = fig.add_subplot(221)
# PREVIOUS HYBRID VERSION. COMMENTING OUT IN CASE IT IS NEEDED.
# Drop first 3 bins
'''bins_per_shot = self.drops + self.bins
num_shots = int(len(self.counter_array[-1])/bins_per_shot)
dropped_array = self.counter_array[:, self.drops:self.drops+self.bins]
for i in range(1,num_shots):
dropped_array=np.append(dropped_array,self.counter_array[:, self.drops*(i+1)+self.bins*i:self.drops*i+self.bins*(i+1)],axis=1)
ax.bar(np.arange(len(dropped_array[-1])), dropped_array[-1])
ax.set_title('Shot: {}'.format(len(self.counter_array)))#Singlt shot
ax = fig.add_subplot(222)
#ax.bar(np.arange(len(self.counter_array[-1, self.drops:])), self.counter_array[:, self.drops:].mean(0))
ax.bar(np.arange(len(dropped_array[-1])), dropped_array.mean(0))
ax.set_title('Iteration average') #Average over all shots/iteration
ax = fig.add_subplot(223)
ax.plot(self.binned_array.transpose(),'.')
#ax.legend(['shot 1', 'shot 2'], fontsize='small', loc=0)'''
#merge conflict
# Average over all shots/iteration
ax2 = fig.add_subplot(222)
ptr = 0
ca = np.array(self.counter_array)
for s in range(self.shots):
xs = np.arange(ptr, ptr + self.bins)
ax.bar(xs, ca[-1, s, self.graph_roi])
ax2.bar(xs, ca[:, s, self.graph_roi].mean(0))
ptr += max(1.05*self.bins, self.bins+1)
ax.set_title('Measurement: {}'.format(len(ca)))
ax2.set_title('Iteration average')
# time series of sum data
ax = fig.add_subplot(223)
# histogram of sum data
ax2 = fig.add_subplot(224)
n_shots = self.binned_array.shape[2]
legends = []
for roi in range(self.binned_array.shape[3]):
for s in range(n_shots):
ax.plot(self.binned_array[:, :, s, roi].flatten(), '.')
# bins = max + 2 takes care of the case where all entries are 0, which casues
# an error in the plot
ax2.hist(
self.binned_array[:, :, s, roi].flatten(),
bins=80,
range=(1.0,self.binned_array[:, :, s, roi].flatten().max()),
histtype='step'
)
legends.append("c{}_s{}".format(roi, s))
#end merge conflict
ax.set_title('Binned Data')
ax2.legend(legends, fontsize='small', loc=0)
super(CounterAnalysis, self).updateFigure()
except:
logger.exception('Problem in CounterAnalysis.updateFigure()')
finally:
self.update_lock = False
class CounterHistogramAnalysis(AnalysisWithFigure):
'''
Takes in shot data, generates histograms, fits histograms,
and then plots various attributes as a function of iteration along with histograms with fit overplotted.
'''
# =====================Fit Functions================= #
def intersection(self, A0,A1,m0,m1,s0,s1):
return (m1*s0**2-m0*s1**2-np.sqrt(s0**2*s1**2*(m0**2-2*m0*m1+m1**2+2*np.log(A0/A1)*(s1**2-s0**2))))/(s0**2-s1**2)
def area(self,A0,A1,m0,m1,s0,s1):
return np.sqrt(np.pi/2)*(A0*s0+A0*s0*erf(m0/np.sqrt(2)/s0)+A1*s1+A1*s1*erf(m1/np.sqrt(2)/s1))
# Normed Overlap for arbitrary cut point
def overlap(self,xc,A0,A1,m0,m1,s0,s1):
err0=A0*np.sqrt(np.pi/2)*s0*(1-erf((xc-m0)/np.sqrt(2)/s0))
err1=A1*np.sqrt(np.pi/2)*s1*(erf((xc-m1)/np.sqrt(2)/s1)+erf(m1/np.sqrt(2)/s1))
return (err0+err1)/self.area(A0,A1,m0,m1,s0,s1)
# Relative Fraction in 1
def frac(self, A0,A1,m0,m1,s0,s1):
return 1/(1+A0*s0*(1+erf(m0/np.sqrt(2)/s0))/A1/s1/(1+erf(m1/np.sqrt(2)/s1)))
def dblgauss(self, x,A0,A1,m0,m1,s0,s1):
return A0*np.exp(-(x-m0)**2 / (2*s0**2)) + A1*np.exp(-(x-m1)**2 / (2*s1**2))
# ==================================================== #
update_lock = Bool(False)
enable = Bool(False)
hbins = Int(30)
hist1 = None
hist2 = None
def __init__(self, name, experiment, description=''):
super(CounterHistogramAnalysis, self).__init__(name, experiment, description)
self.properties += ['enable']
def preExperiment(self, experimentResults):
# self.hist_rec = np.recarray(1,)
return
def analyzeMeasurement(self, measurementResults, iterationResults, experimentResults):
return
def analyzeIteration(self, iterationResults, experimentResults):
if self.enable:
histout = [] # amplitudes, edges
# Overlap, fraction, cutoff
fitout = np.recarray(2, [('overlap', float), ('fraction', float), ('cutoff', float)])
optout = np.recarray(2, [('A0', float), ('A1', float), ('m0', float), ('m1', float), ('s0', float), ('s1', float)])
shots = iterationResults['shotData'][()]
# make shot number the primary axis
shots = shots.reshape(-1, *shots.shape[2:]).swapaxes(0, 1)
shots = shots[:, :, 0] # pick out first roi only
hbins = self.hbins
if self.hbins < 0:
hbins = np.arange(np.max(shots)+1)
for i in range(shots.shape[0]):
gmix.fit(np.array([shots[i]]).transpose())
h = np.histogram(shots[i], bins=hbins, normed=True)
histout.append((h[1][:-1], h[0]))
est = [
gmix.weights_.max()/10,
gmix.weights_.min()/10,
gmix.means_.min(),
gmix.means_.max(),
np.sqrt(gmix.means_.min()),
np.sqrt(gmix.means_.max())
]
try:
popt, pcov = curve_fit(self.dblgauss, h[1][1:], h[0], est)
# popt=[A0,A1,m0,m1,s0,s1] : Absolute value
popt = np.abs(popt)
xc = self.intersection(*popt)
if np.isnan(xc):
logger.warning('Bad Cut on Shot: {}'.format(i))
fitout[i] = np.nan, np.nan, np.nan
optout[i] = popt*np.nan
else:
fitout[i] = self.overlap(xc, *popt), self.frac(*popt), xc
optout[i] = popt
except (RuntimeError, RuntimeWarning, TypeError):
logger.exception('Bad fit on Shot: {} '.format(i))
fitout[i] = np.nan, np.nan, np.nan
optout[i] = np.ones(6)*np.nan
iterationResults['analysis/dblGaussPopt'] = optout
iterationResults['analysis/dblGaussFit'] = fitout
logger.info("histout: {}".format(histout))
iterationResults['analysis/histogram'] = np.array(histout,
dtype='uint32')
self.updateFigure(iterationResults)
return
def updateFigure(self, iterationResults):
if self.draw_fig:
if self.enable:
if not self.update_lock:
try:
self.update_lock = True
# There are two figures in an AnalysisWithFigure. Draw to the offscreen figure.
fig = self.backFigure
# Clear figure.
fig.clf()
shots = iterationResults['shotData'][()]
# flatten sub-measurement dimension
# make shot number the primary axis (not measurement)
shots = shots.reshape(-1, *shots.shape[2:]).swapaxes(0, 1)
roi = 0
shots = shots[:, :, roi] # pick out first roi only
popts = iterationResults['analysis/dblGaussPopt']
# fits = iterationResults['analysis/dblGaussFit']
# make one plot
for i in range(len(shots)):
ax = fig.add_subplot('{}1{}'.format(len(shots), 1+i))
hbins = self.hbins
if self.hbins < 0:
# use explicit bins
hbins = np.arange(np.max(shots[i, :])+1)
h = ax.hist(shots[i], bins=hbins, histtype='step', normed=True)
ax.plot(h[1][1:]-.5, self.dblgauss(h[1][1:], *popts[i]))
if i == 1:
ax.set_yscale('log', nonposy='clip')
ax.set_ylim(10**int(-np.log10(len(shots[i]))-1), 1)
else:
ax.set_ylim(0, 1.05*np.max(h[0]))
super(CounterHistogramAnalysis, self).updateFigure()
except:
logger.exception('Problem in CounterHistogramAnalysis.updateFigure().')
finally:
self.update_lock = False
|
QuantumQuadrate/CsPyController
|
python/Counter.py
|
Python
|
lgpl-3.0
| 19,978
|
"""
Module defining DataID as enumeration, e.g. concentration, velocity.
class Enum allows accessing members by .name and .value
FunctionID is deprecated and will be removed
"""
from enum import IntEnum, auto
# Schema for metadata
DataSchema = {
"type": "object",
"properties": {
"Type": {"type": "string"}, # Automatically generated from MuPIF, e.g. mupif.field.Field
"Type_ID": {"type": "string"}, # Automatically generated from MuPIF, e.g. DataID.FID_Temperature
"Name": {"type": "string"}, # e.g. "Density of inclusion"
"ID": {"type": ["string", "integer"]}, # Unique ID
"Description": {"type": "string"}, # Further description
"Units": {"type": "string"}, # Automatically generated from MuPIF, e.g. "kg"
"ValueType": {"type": "string"}, # Automatically generated
"Origin": {"type": "string", "enum": ["Experiment", "User_input", "Simulated"]},
"Experimental_details": {"type": "string"},
"Experimental_record": {"type": "string"}, # If applies, link to corresponding experimental record
"Estimated_std": {"type": "number"}, # Percent of standard deviation
"Execution": {
"properties": {
"ID": {"type": ["string", "integer"]}, # Optional execution ID
"Use_case_ID": {"type": ["string", "integer"]}, # If Simulated, give reference to Use_case_ID
"Task_ID": {"type": "string"} # If Simulated, give reference to Task_ID
},
"required": []
}
},
"required": [
"Type", "Type_ID", "Units", "ValueType"
]
}
class DataID(IntEnum):
"""
This class represents the supported values of IDs of property, field, etc.
Values of members should be stored by .name, .value should not be used.
"""
# # # # # # # # # # # # # # # # # # # # #
# Field
FID_Displacement = auto()
FID_Strain = auto()
FID_Stress = auto()
FID_Temperature = auto()
FID_Humidity = auto()
FID_Concentration = auto()
FID_Thermal_absorption_volume = auto()
FID_Thermal_absorption_surface = auto()
FID_Material_number = auto()
FID_BucklingShape = auto()
FID_FibreOrientation = auto()
FID_DomainNumber = auto()
FID_Permeability = auto()
FID_Velocity = auto()
FID_Pressure = auto()
FID_ESI_VPS_Displacement = auto()
# # # # # # # # # # # # # # # # # # # # #
# GY field IDs
FID_Mises_Stress = auto()
FID_MaxPrincipal_Stress = auto()
FID_MidPrincipal_Stress = auto()
FID_MinPrincipal_Stress = auto()
FID_MaxPrincipal_Strain = auto()
FID_MidPrincipal_Strain = auto()
FID_MinPrincipal_Strain = auto()
# # # # # # # # # # # # # # # # # # # # #
# Particle
PSID_ParticlePositions = auto()
# # # # # # # # # # # # # # # # # # # # #
# Function
FuncID_ProbabilityDistribution = auto()
# # # # # # # # # # # # # # # # # # # # #
# Misc
ID_None = auto()
ID_GrainState = auto()
ID_InputFile = auto()
# # # # # # # # # # # # # # # # # # # # #
# Property
PID_Concentration = auto()
PID_CumulativeConcentration = auto()
PID_Velocity = auto()
PID_transient_simulation_time = auto()
PID_effective_conductivity = auto()
PID_volume_fraction_red_phosphor = auto()
PID_volume_fraction_green_phosphor = auto()
PID_conductivity_red_phosphor = auto()
PID_conductivity_green_phosphor = auto()
PID_mean_radius_red_phosphor = auto()
PID_mean_radius_green_phosphor = auto()
PID_standard_deviation_red_phosphor = auto()
PID_standard_deviation_green_phosphor = auto()
PID_RefractiveIndex = auto()
PID_NumberOfRays = auto()
PID_LEDSpectrum = auto()
PID_ChipSpectrum = auto()
PID_LEDColor_x = auto()
PID_LEDColor_y = auto()
PID_LEDCCT = auto()
PID_LEDRadiantPower = auto()
PID_ParticleNumberDensity = auto()
PID_ParticleRefractiveIndex = auto()
PID_EmissionSpectrum = auto()
PID_ExcitationSpectrum = auto()
PID_AsorptionSpectrum = auto()
PID_ScatteringCrossSections = auto()
PID_InverseCumulativeDist = auto()
PID_NumberOfFluorescentParticles = auto()
PID_ParticleMu = auto()
PID_ParticleSigma = auto()
PID_PhosphorEfficiency = auto()
PID_Length = auto()
PID_Height = auto()
PID_Thickness = auto()
PID_Deflection = auto()
PID_EModulus = auto() # Young's modulus
PID_PoissonRatio = auto()
# Mul2 properties
PID_YoungModulus1 = auto()
PID_YoungModulus2 = auto()
PID_YoungModulus3 = auto()
PID_PoissonRatio23 = auto()
PID_PoissonRatio13 = auto()
PID_PoissonRatio12 = auto()
PID_ShearModulus23 = auto()
PID_ShearModulus13 = auto()
PID_ShearModulus12 = auto()
PID_CriticalLoadLevel = auto()
# INSA properties
PID_ExtensionalInPlaneStiffness = auto()
PID_ExtensionalOutOfPlaneStiffness = auto()
PID_ShearInPlaneStiffness = auto()
PID_ShearOutOfPlaneStiffness = auto()
PID_LocalBendingStiffness = auto()
PID_CriticalForce = auto()
PID_CriticalMoment = auto()
# Digimat Properties
PID_MatrixYoung = auto()
PID_MatrixPoisson = auto()
PID_InclusionYoung = auto()
PID_InclusionPoisson = auto()
PID_InclusionVolumeFraction = auto()
PID_InclusionAspectRatio = auto()
PID_MatrixOgdenModulus = auto()
PID_MatrixOgdenExponent = auto()
PID_InclusionSizeNormalized = auto()
PID_CompositeAxialYoung = auto()
PID_CompositeInPlaneYoung = auto()
PID_CompositeInPlaneShear = auto()
PID_CompositeTransverseShear = auto()
PID_CompositeInPlanePoisson = auto()
PID_CompositeTransversePoisson = auto()
PID_CompositeStrain11Tensor = auto()
PID_CompositeStrain22Tensor = auto()
PID_CompositeStress11Tensor = auto()
PID_MatrixDensity = auto()
PID_CompositeDensity = auto()
PID_InclusionDensity = auto()
# CUBA keywords from Jun 6, 2017 - https://github.com/simphony/simphony-common/blob/master/ontology/cuba.yml
PID_Position = auto()
PID_Direction = auto()
PID_Status = auto()
PID_Label = auto()
PID_Chemical_specie = auto()
PID_Material_type = auto()
PID_Shape_center = auto()
PID_Shape_length = auto()
PID_Shape_radius = auto()
PID_Shape_side = auto()
PID_Crystal_storage = auto()
PID_Name_UC = auto()
PID_Lattice_vectors = auto()
PID_Symmetry_lattice_vectors = auto()
PID_Occupancy = auto()
PID_Bond_label = auto()
PID_Bond_type = auto()
# PID_Velocity = auto() Duplicate
PID_Dimension = auto()
PID_Acceleration = auto()
PID_Radius = auto()
PID_Size = auto()
PID_Mass = auto()
PID_Volume = auto()
PID_Angular_velocity = auto()
PID_Angular_acceleration = auto()
PID_Simulation_domain_dimensions = auto()
PID_Simulation_domain_origin = auto()
PID_Dynamic_viscosity = auto()
PID_Kinematic_viscosity = auto()
PID_Diffusion_coefficient = auto()
PID_Probability_coefficient = auto()
PID_Friction_coefficient = auto()
PID_Scaling_coefficient = auto()
PID_Equation_of_state_coefficient = auto()
PID_Contact_angle = auto()
PID_Amphiphilicity = auto()
PID_Phase_interaction_strength = auto()
PID_Hamaker_constant = auto()
PID_Zeta_potential = auto()
PID_Ion_valence_effect = auto()
PID_Debye_length = auto()
PID_Smoothing_length = auto()
PID_Lattice_spacing = auto()
PID_Time_step = auto()
PID_Number_of_time_steps = auto()
PID_Force = auto()
PID_Torque = auto()
PID_Density = auto()
PID_Pressure = auto()
PID_Temperature = auto()
PID_Distribution = auto()
PID_Order_parameter = auto()
PID_Original_position = auto()
PID_Current = auto()
PID_Final = auto()
PID_Delta_displacement = auto()
PID_External_applied_force = auto()
PID_Euler_angles = auto()
PID_Sphericity = auto()
PID_Young_modulus = auto()
PID_Poisson_ratio = auto()
PID_Restitution_coefficient = auto()
PID_Rolling_friction = auto()
PID_Volume_fraction = auto()
PID_Coupling_time = auto()
PID_Cutoff_distance = auto()
PID_Energy_well_depth = auto()
PID_Van_der_Waals_radius = auto()
PID_Dielectric_constant = auto()
PID_Dynamic_pressure = auto()
PID_Flux = auto()
PID_Homogenized_stress_tensor = auto()
PID_Strain_tensor = auto()
PID_Relative_velocity = auto()
PID_Diffusion_velocity = auto()
PID_Stress_tensor = auto()
PID_Volume_fraction_gradient = auto()
PID_Cohesion_energy_density = auto()
PID_Major = auto()
PID_Minor = auto()
PID_Patch = auto()
PID_Full = auto()
PID_Charge = auto()
PID_Charge_density = auto()
PID_Description = auto()
PID_Electric_field = auto()
PID_Electron_mass = auto()
PID_Electrostatic_field = auto()
PID_Energy = auto()
PID_Heat_conductivity = auto()
PID_Initial_viscosity = auto()
PID_Linear_constant = auto()
PID_Maximum_viscosity = auto()
PID_Minimum_viscosity = auto()
PID_Momentum = auto()
PID_Moment_inertia = auto()
PID_Potential_energy = auto()
PID_Power_law_index = auto()
PID_Relaxation_time = auto()
PID_Surface_tension = auto()
PID_Time = auto()
PID_Viscosity = auto()
PID_Collision_operator = auto()
PID_Reference_density = auto()
PID_External_forcing = auto()
PID_Flow_type = auto()
PID_Vector = auto()
PID_Index = auto()
PID_Thermodynamic_ensemble = auto()
PID_Variable = auto()
PID_None = auto()
PID_Lattice_parameter = auto()
PID_Steady_state = auto()
PID_Maximum_Courant_number = auto()
PID_Number_of_cores = auto()
PID_Magnitude = auto()
PID_Number_of_physics_states = auto()
PID_Cohesive_group = auto()
PID_FillingTime = auto()
# End of CUBA keywords
PID_Demo_Min = auto()
PID_Demo_Max = auto()
PID_Demo_Integral = auto()
PID_Demo_Volume = auto()
PID_Demo_Value = auto()
PID_UserTimeStep = auto()
PID_KPI01 = auto()
# ESI VPS properties
PID_ESI_VPS_TEND = auto()
PID_ESI_VPS_PLY1_E0t1 = auto()
PID_ESI_VPS_PLY1_E0t2 = auto()
PID_ESI_VPS_PLY1_E0t3 = auto()
PID_ESI_VPS_PLY1_G012 = auto()
PID_ESI_VPS_PLY1_G023 = auto()
PID_ESI_VPS_PLY1_G013 = auto()
PID_ESI_VPS_PLY1_NU12 = auto()
PID_ESI_VPS_PLY1_NU23 = auto()
PID_ESI_VPS_PLY1_NU13 = auto()
PID_ESI_VPS_PLY1_E0c1 = auto()
PID_ESI_VPS_PLY1_RHO = auto()
PID_ESI_VPS_hPLY = auto()
PID_ESI_VPS_PLY1_XT = auto()
PID_ESI_VPS_PLY1_XC = auto()
PID_ESI_VPS_PLY1_YT = auto()
PID_ESI_VPS_PLY1_YC = auto()
PID_ESI_VPS_PLY1_S12 = auto()
PID_ESI_VPS_FIRST_FAILURE_VAL = auto()
PID_ESI_VPS_FIRST_FAILURE_MOM = auto()
PID_ESI_VPS_FIRST_FAILURE_ROT = auto()
PID_ESI_VPS_CRIMP_STIFFNESS = auto()
PID_ESI_VPS_FIRST_FAILURE_ELE = auto()
PID_ESI_VPS_FIRST_FAILURE_PLY = auto()
PID_ESI_VPS_TOTAL_MODEL_MASS = auto()
PID_ESI_VPS_BUCKL_LOAD = auto()
PID_ESI_VPS_MOMENT_CURVE = auto()
PID_ESI_VPS_ROTATION_CURVE = auto()
PID_ESI_VPS_MOMENT = auto()
PID_ESI_VPS_ROTATION = auto()
PID_ESI_VPS_THNOD_1 = auto()
PID_ESI_VPS_THNOD_2 = auto()
PID_ESI_VPS_SECFO_1 = auto()
PID_ESI_VPS_SECFO_2 = auto()
PID_BoundaryConfiguration = auto()
# University of Trieste properties
PID_SMILE_MOLECULAR_STRUCTURE = auto()
PID_MOLECULAR_WEIGHT = auto()
PID_POLYDISPERSITY_INDEX = auto()
PID_CROSSLINKER_TYPE = auto()
PID_FILLER_DESIGNATION = auto()
PID_SMILE_MODIFIER_MOLECULAR_STRUCTURE = auto()
PID_SMILE_FILLER_MOLECULAR_STRUCTURE = auto()
PID_CROSSLINKONG_DENSITY = auto()
PID_FILLER_CONCENTRATION = auto()
PID_DENSITY_OF_FUNCTIONALIZATION = auto()
PID_TEMPERATURE = auto()
PID_PRESSURE = auto()
PID_DENSITY = auto()
PID_TRANSITION_TEMPERATURE = auto()
# GY user-case property IDs
PID_HyperelasticPotential = auto()
PID_ForceCurve = auto()
PID_DisplacementCurve = auto()
PID_CorneringAngle = auto()
PID_CorneringStiffness = auto()
# Demo properties
PID_dirichletBC = auto()
PID_conventionExternalTemperature = auto()
PID_conventionCoefficient = auto()
# GY property IDs
PID_Footprint = auto()
PID_Braking_Force = auto()
PID_Stiffness = auto()
PID_Hyper1 = auto()
PID_maxDisplacement = auto()
PID_maxMisesStress = auto()
PID_maxPrincipalStress = auto()
PID_Hyper2 = auto()
#
PID_GrainState = auto()
|
mupif/mupif
|
mupif/dataid.py
|
Python
|
lgpl-3.0
| 12,557
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^$', 'core.views.empresta_list_view', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
)
|
luzfcb/projetoteste
|
projeto/urls.py
|
Python
|
lgpl-3.0
| 308
|
#!/usr/bin/env python
# This file should be compatible with both Python 2 and 3.
# If it is not, please file a bug report.
"""
This is a Class which allows one to manipulate a git repository.
"""
#external imports
import os
import tempfile
#internal imports
import subuserlib.subprocessExtras as subprocessExtras
from subuserlib.classes.fileStructure import FileStructure
class GitRepository():
def __init__(self,path):
self.__path = path
def getPath(self):
return self.__path
def run(self,args):
"""
Run git with the given command line arguments.
"""
return subprocessExtras.call(["git"]+args,cwd=self.getPath())
def runCollectOutput(self,args):
"""
Run git with the given command line arguments and return a tuple with (returncode,output).
"""
return subprocessExtras.callCollectOutput(["git"]+args,cwd=self.getPath())
def getFileStructureAtCommit(self,commit):
"""
Get a ``FileStructure`` object which relates to the given git commit.
"""
return GitFileStructure(self,commit)
def commit(self,message):
"""
Run git commit with the given commit message.
"""
try:
tempFile = tempfile.NamedTemporaryFile("w",encoding="utf-8")
except TypeError: # Older versions of python have broken tempfile implementation for which you cannot set the encoding.
tempFile = tempfile.NamedTemporaryFile("w")
message = message.encode('ascii', 'ignore').decode('ascii')
with tempFile as tempFile:
tempFile.write(message)
tempFile.flush()
return self.run(["commit","--file",tempFile.name])
def checkout(self,commit,files=[]):
"""
Run git checkout
"""
self.run(["checkout",commit]+files)
class GitFileStructure(FileStructure):
def __init__(self,gitRepository,commit):
"""
Initialize the file structure.
Here we setup test stuff:
>>> import subuserlib.subprocessExtras
>>> subuserlib.subprocessExtras.call(["git","init"],cwd="/home/travis/hashtest")
0
>>> subuserlib.subprocessExtras.call(["git","add","."],cwd="/home/travis/hashtest")
0
>>> subuserlib.subprocessExtras.call(["git","commit","-m","Initial commit"],cwd="/home/travis/hashtest")
0
"""
self.__gitRepository = gitRepository
self.__commit = commit
def getCommit(self):
return self.__commit
def getRepository(self):
return self.__gitRepository
def lsTree(self, subfolder, extraArgs=[]):
"""
Returns a list of tuples of the form:
(mode,type,hash,path)
Coresponding to the items found in the subfolder.
"""
if not subfolder.endswith("/"):
subfolder += "/"
if subfolder == "/":
subfolder = "./"
(returncode,output) = self.getRepository().runCollectOutput(["ls-tree"]+extraArgs+[self.getCommit(),subfolder])
if returncode != 0:
return [] # This commenting out is intentional. It is simpler to just return [] here than to check if the repository is properly initialized everywhere else.
lines = output.splitlines()
items = []
for line in lines:
mode,objectType,rest = line.split(" ",2)
objectHash,path = rest.split("\t",1)
items.append((mode,objectType,objectHash,path))
return items
def ls(self, subfolder, extraArgs=[]):
"""
Returns a list of file and folder paths.
Paths are relative to the repository as a whole.
>>> from subuserlib.classes.gitRepository import GitRepository
>>> gitRepository = GitRepository("/home/travis/hashtest")
>>> fileStructure = gitRepository.getFileStructureAtCommit("master")
>>> print(",".join(fileStructure.ls("./")))
bar,blah
"""
items = self.lsTree(subfolder,extraArgs)
paths = []
for item in items:
paths.append(item[3])
return paths
def lsFiles(self,subfolder):
"""
Returns a list of paths to files in the subfolder.
Paths are relative to the repository as a whole.
>>> from subuserlib.classes.gitRepository import GitRepository
>>> gitRepository = GitRepository("/home/travis/hashtest")
>>> fileStructure = gitRepository.getFileStructureAtCommit("master")
>>> print(",".join(fileStructure.lsFiles("./")))
blah
"""
return list(set(self.ls(subfolder)) - set(self.lsFolders(subfolder)))
def lsFolders(self,subfolder):
"""
Returns a list of paths to folders in the subfolder.
Paths are relative to the repository as a whole.
>>> from subuserlib.classes.gitRepository import GitRepository
>>> gitRepository = GitRepository("/home/travis/hashtest")
>>> fileStructure = gitRepository.getFileStructureAtCommit("master")
>>> print(",".join(fileStructure.lsFolders("./")))
bar
"""
return self.ls(subfolder,extraArgs=["-d"])
def exists(self,path):
"""
>>> from subuserlib.classes.gitRepository import GitRepository
>>> gitRepository = GitRepository("/home/travis/hashtest")
>>> fileStructure = gitRepository.getFileStructureAtCommit("master")
>>> fileStructure.exists("./blah")
True
>>> fileStructure.exists("./non-existant")
False
"""
try:
self.read(path)
return True
except OSError:
return False
def read(self,path):
"""
Returns the contents of the given file at the given commit.
>>> from subuserlib.classes.gitRepository import GitRepository
>>> gitRepository = GitRepository("/home/travis/hashtest")
>>> fileStructure = gitRepository.getFileStructureAtCommit("master")
>>> print(fileStructure.read("./blah"))
blahblah
<BLANKLINE>
"""
(errorcode,content) = self.getRepository().runCollectOutput(["show",self.getCommit()+":"+path])
if errorcode != 0:
raise OSError("Git show exited with error "+str(errorcode)+". File does not exist.")
return content
def getMode(self,path):
"""
>>> from subuserlib.classes.gitRepository import GitRepository
>>> gitRepository = GitRepository("/home/travis/hashtest")
>>> fileStructure = gitRepository.getFileStructureAtCommit("master")
>>> print(fileStructure.getModeString("./blah"))
100644
"""
allObjects = self.lsTree("./",extraArgs=["-r"])
for treeObject in allObjects:
if os.path.normpath(treeObject[3]) == os.path.normpath(path):
return int(treeObject[0],8)
|
weaver-viii/subuser
|
logic/subuserlib/classes/gitRepository.py
|
Python
|
lgpl-3.0
| 6,303
|
"""
Cairo test suite.
"""
import imp
import os
import cairosvg
reference_cairosvg = imp.load_source(
'cairosvg_reference', pathname=os.path.join(
os.path.dirname(__file__), 'cairosvg_reference', 'cairosvg',
'__init__.py'))
cairosvg.features.LOCALE = reference_cairosvg.features.LOCALE = 'en_US'
TEST_FOLDER = os.path.join(os.path.dirname(__file__), 'svg')
os.chdir(TEST_FOLDER) # relative image urls
if os.environ.get('CAIROSVG_TEST_FILES'): # pragma: no cover
ALL_FILES = os.environ['CAIROSVG_TEST_FILES'].split(',')
else:
ALL_FILES = os.listdir(TEST_FOLDER)
ALL_FILES.sort(key=lambda name: name.lower())
FILES = [
os.path.join(
os.path.dirname(TEST_FOLDER) if name.startswith('fail')
else TEST_FOLDER, name)
for name in ALL_FILES]
|
Kozea/CairoSVG
|
test_non_regression/__init__.py
|
Python
|
lgpl-3.0
| 796
|
'''
This file contains all the functions that constitute the "frontend of the backend",
i.e. native Python plotting functions. All actual plots are generated by plotting.py --
this is purely about displaying them. manualfit() and geogui() rely on PyQt4, which
is likely to fail, so it's sequestered.
Version: 2019aug06
'''
## Imports and globals...need Qt since matplotlib doesn't support edit boxes, grr!
from optima import OptimaException, dcp, printv, sigfig, makeplots, getplotselections, gridcolors, odict, isnumber, promotetolist, loadobj, sanitizeresults, reanimateplots
from pylab import figure, close, floor, ion, ioff, isinteractive, ceil, array, show, pause
from pylab import subplot, ylabel, transpose, legend, fill_between, xlim, title
from matplotlib.widgets import CheckButtons, Button
global panel, results, origpars, tmppars, parset, fulllabellist, fullkeylist, fullsubkeylist, fulltypelist, fullvallist, plotfig, panelfig, check, checkboxes, updatebutton, clearbutton, defaultsbutton, advancedbutton, closebutton, plusbutton, minusbutton, plotargs, scrwid, scrhei, globaladvanced # For manualfit GUI
if 1: panel, results, origpars, tmppars, parset, fulllabellist, fullkeylist, fullsubkeylist, fulltypelist, fullvallist, plotfig, panelfig, check, checkboxes, updatebutton, clearbutton, defaultsbutton, advancedbutton, closebutton, plusbutton, minusbutton, plotargs, scrwid, scrhei, globaladvanced = [None]*25
scrwid, scrhei = 24, 12 # Specify these here...if too large, should shrink anyway
def importpyqt():
''' Try to import pyqt, either PyQt4 or PyQt5, but allow it to fail '''
try:
from PyQt4 import QtGui as pyqt
except:
try: from PyQt5 import QtWidgets as pyqt
except: pyqt = Exception('QtGui could not be imported')
return pyqt
pyqt = importpyqt()
##############################################################################
### USER-VISIBLE FUNCTIONS
##############################################################################
def plotresults(results, toplot=None, fig=None, figargs=None, **kwargs):
'''
Does the hard work for updateplots() for pygui()
Keyword arguments if supplied are passed on to figure().
Usage:
results = P.runsim('default')
plotresults(results)
Version: 2016jan25
'''
if figargs is None: figargs = dict()
if fig is None: fig = makenewfigure(**figargs)
# Do plotting
wasinteractive = isinteractive() # You might think you can get rid of this...you can't!
if wasinteractive: ioff()
width,height = fig.get_size_inches()
# Actually create plots
if 'figsize' in kwargs: kwargs.pop('figsize', None)
plots = makeplots(results, toplot=toplot, die=True, figsize=(width, height), fig=fig, **kwargs)
naxes = len(plots[0].axes) # If done interactively, they're all in the first plot
# Calculate the rows and columns
aspectratio = 1.5 # The target aspect ratio for plots, for choosing how many rows and columns to use
nrows = 1
ncols = 1
while nrows*ncols < naxes:
if width/ncols/aspectratio > height/nrows: ncols += 1 # Height is more squashed: add a column
else: nrows += 1 # Width is more squashed: add a row
# Adjust margins
fig.subplots_adjust(left=0.07, bottom=0.05, right=0.85, top=0.95, wspace=0.9, hspace=0.7) # NB, 1.0 seems meaningless for wspace and hspace...
for a,ax in enumerate(plots[-1].axes):
ax.change_geometry(nrows, ncols, a+1)
# Handle interactivity like a boss
if wasinteractive: ion()
show()
return None
def pygui(tmpresults, toplot=None, advanced=False, verbose=2, figargs=None, **kwargs):
'''
PYGUI
Make a Python GUI for plotting results. Opens up a control window and a plotting window,
and when "Update" is clicked, will clear the contents of the plotting window and replot.
Usage:
pygui(results, [toplot])
where results is the output of e.g. runsim() and toplot is an optional list of form e.g.
toplot = ['prev-tot', 'inci-pop']
(see epiformatslist in plotting.py)
Warning: the plots won't resize automatically if the figure is resized, but if you click
"Update", then they will.
Version: 1.3 (2017feb07)
'''
global check, checkboxes, updatebutton, clearbutton, defaultsbutton, advancedbutton, closebutton, plusbutton, minusbutton, panelfig, results, plotargs, globaladvanced
plotargs = kwargs # Reset global to match function input
results = sanitizeresults(tmpresults)
globaladvanced = advanced
## Define options for selection
plotselections = getplotselections(results, advanced=globaladvanced)
checkboxes = plotselections['keys']
checkboxnames = plotselections['names']
isselected = []
toplot = promotetolist(toplot) # Ensure it's a list
if not toplot or toplot[0] is None or toplot[0]=='default':
if len(toplot): toplot.pop(0) # Remove the first element
defaultboxes = [checkboxes[i] for i,tf in enumerate(plotselections['defaults']) if tf] # Back-convert defaults from true/false list to list of keys
toplot.extend(defaultboxes)
if len(toplot):
tmptoplot = dcp(toplot) # Make a copy to compare arguments
for key in checkboxes:
if key in toplot:
isselected.append(True)
tmptoplot.remove(key)
else:
isselected.append(False)
if len(tmptoplot)!=0:
errormsg = 'Not all keys were recognized; mismatched ones were:\n'
errormsg += '%s\n' % tmptoplot
errormsg += 'Available keys are:\n'
errormsg += '%s' % checkboxes
if not globaladvanced: errormsg += '\nSet advanced=True for more options'
printv(errormsg, 1, verbose=verbose)
## Set up control panel
if advanced:
figwidth = 14
advwid = 0.6 # Adjust button width
else:
figwidth = 7
advwid = 1.0
figheight = 12
panelfig = figure(num='Optima control panel', figsize=(figwidth,figheight), facecolor=(0.95, 0.95, 0.95)) # Open control panel
xinit = 0.10*advwid
if advanced: cbapos = [xinit*advwid, 0.07, 0.9, 1.8] # cba="check box axes position": extra tall, for moving later
else: cbapos = [xinit, 0.07, 0.8, 0.9]
ypos = 0.02 # y-position of buttons
bwid = 0.14*advwid # x-width of buttons
bhei = 0.03 # y-height of buttons
sep = 0.165 # Separation between buttons
pmwid = 0.03*advwid # Width of plus/minus buttons
checkboxaxes = panelfig.add_axes(cbapos) # Create checkbox locations
updateaxes = panelfig.add_axes([xinit+0*sep, ypos, bwid, bhei]) # Create update button location
clearaxes = panelfig.add_axes([xinit+1*sep, ypos, bwid, bhei]) # Create clear button location
defaultsaxes = panelfig.add_axes([xinit+2*sep, ypos, bwid, bhei]) # Create defaults button location
advancedaxes = panelfig.add_axes([xinit+3*sep, ypos, bwid, bhei]) # Create defaults button location
closeaxes = panelfig.add_axes([xinit+4*sep, ypos, bwid, bhei]) # Create close button location
plusaxes = panelfig.add_axes([xinit+5*sep, ypos+0.015, pmwid, 0.02]) # Create plus button location
minusaxes = panelfig.add_axes([xinit+5*sep, ypos-0.005, pmwid, 0.02]) # Create plus button location
check = CheckButtons(checkboxaxes, checkboxnames, isselected) # Actually create checkboxes
# Reformat the checkboxes
stastr = ' - stacked'
perstr = ' - population'
nboxes = len(check.rectangles)
for b in range(nboxes):
label = check.labels[b]
labeltext = label.get_text()
labelpos = label.get_position()
label.set_position((labelpos[0]*0.3,labelpos[1])) # Not sure why by default the check boxes are so far away
if labeltext.endswith(perstr): label.set_text('Per population') # Clear label
elif labeltext.endswith(stastr): label.set_text('Stacked') # Clear label
else: label.set_weight('bold')
# If advanced, split into two columns -- messy since Matplotlib sucks! :(
if advanced:
for b in range(nboxes):
percol = floor(nboxes/2.0) # Number of boxes per column
col = floor(b/percol) # Which column to plto in
labelpos = list(check.labels[b].get_position()) # Get label positions and convert tuple -> list
rectpos = list(check.rectangles[b].get_xy()) # Likewise for rectangles
line0pos = check.lines[b][0].get_data() # There are two lines, and they have data
line1pos = check.lines[b][1].get_data()
yoffset = 0.5 # Specify amount to move everything in column 0 down by
xoffset = 0.45 # Specify amount to move everything on column 1 over by
if col==0: # Left column: shift everything down
labelpos[1] -= yoffset
rectpos[1] -= yoffset
for i in range(2): # Start and end points
line0pos[1][i] -= yoffset
line1pos[1][i] -= yoffset
else: # Right column: shift everything over
labelpos[0] += xoffset
rectpos[0] += xoffset
for i in range(2): # Start and end points
line0pos[0][i] += xoffset
line1pos[0][i] += xoffset
check.labels[b].set_position(labelpos) # Actually set positions
check.rectangles[b].set_xy(rectpos)
check.lines[b][0].set_data(line0pos)
check.lines[b][1].set_data(line1pos)
if advanced: advlabel = 'Normal'
else: advlabel = 'Advanced'
blue = (0.4,0.7,1.0) # Also green = (0.2,0.7,0.1), red = (1.0,0.5,0.1)
white = (1.0,1.0,1.0)
black = (0.4,0.4,0.4)
darker = 0.7
updatebutton = Button(updateaxes, 'Update', color=blue, hovercolor=tuple(array(blue)*darker))
clearbutton = Button(clearaxes, 'Clear', color=blue, hovercolor=tuple(array(blue)*darker))
defaultsbutton = Button(defaultsaxes, 'Defaults', color=blue, hovercolor=tuple(array(blue)*darker))
advancedbutton = Button(advancedaxes, advlabel, color=blue, hovercolor=tuple(array(blue)*darker))
closebutton = Button(closeaxes, 'Close', color=blue, hovercolor=tuple(array(blue)*darker))
plusbutton = Button(plusaxes, '+', color=white, hovercolor=tuple(array(white)*darker))
minusbutton = Button(minusaxes, '-', color=black, hovercolor=tuple(array(black)*darker))
updatebutton.on_clicked(updateplots) # Update figure if button is clicked
clearbutton.on_clicked(clearselections) # Clear all checkboxes
defaultsbutton.on_clicked(defaultselections) # Return to default selections
advancedbutton.on_clicked(advancedselections) # Return to default selections
closebutton.on_clicked(closegui) # Close figures
plusbutton.on_clicked(zoomin) # Zoom in on plots
minusbutton.on_clicked(zoomout) # Zoom in on plots
updateplots(None) # Plot initially -- ACTUALLY GENERATES THE PLOTS
return None
def manualfit(project=None, parsubset=None, name=-1, ind=0, maxrows=25, verbose=2, advanced=False, figargs=None, **kwargs):
'''
Create a GUI for doing manual fitting via the backend. Opens up three windows:
results, results selection, and edit boxes.
parsubset can be a list of parameters the user can fit, e.g.
parsubset=['initprev','force']
maxrows is the number of rows (i.e. parameters) to display in each column.
Note: to get advanced parameters and plots, set advanced=True.
Version: 1.2 (2017feb10)
'''
## Random housekeeping
global panel, results, origpars, tmppars, parset, fulllabellist, fullkeylist, fullsubkeylist, fulltypelist, fullvallist, globaladvanced
globaladvanced = advanced
if figargs is None: figargs = dict()
fig = figure(**figargs); close(fig) # Open and close figure...dumb, no? Otherwise get "QWidget: Must construct a QApplication before a QPaintDevice"
ion() # We really need this here!
nsigfigs = 4
boxes = []
texts = []
## Get the list of parameters that can be fitted
parset = dcp(project.parsets[name])
tmppars = parset.pars
origpars = dcp(tmppars)
mflists = parset.manualfitlists(parsubset=parsubset, advanced=globaladvanced)
fullkeylist = mflists['keys']
fullsubkeylist = mflists['subkeys']
fulltypelist = mflists['types']
fullvallist = mflists['values']
fulllabellist = mflists['labels']
nfull = len(fulllabellist) # The total number of boxes needed
results = project.runsim(name)
pygui(results, **kwargs)
def closewindows():
''' Close all three open windows '''
closegui()
panel.close()
## Define update step
def manualupdate():
''' Update GUI with new results '''
global results, tmppars, fullkeylist, fullsubkeylist, fulltypelist, fullvallist
# Update parameter values from GUI values
for b,box in enumerate(boxes):
fullvallist[b] = eval(str(box.text()))
# Create lists for update
mflists = odict()
mflists['keys'] = fullkeylist
mflists['subkeys'] = fullsubkeylist
mflists['types'] = fulltypelist
mflists['values'] = fullvallist
parset.update(mflists)
# Rerun
simparslist = parset.interp(start=project.settings.start, end=project.settings.end, dt=project.settings.dt)
results = project.runsim(simpars=simparslist)
updateplots(tmpresults=results, **kwargs)
## Keep the current parameters in the project; otherwise discard
def keeppars():
''' Little function to reset origpars and update the project '''
global origpars, tmppars, parset
origpars = dcp(tmppars)
parset.pars = tmppars
project.parsets[name].pars = tmppars
print('Parameters kept')
return None
def resetpars():
''' Reset the parameters to the last saved version -- WARNING, doesn't work '''
global origpars, tmppars, parset
tmppars = dcp(origpars)
parset.pars = tmppars
for i in range(nfull): boxes[i].setText(sigfig(fullvallist[i], sigfigs=nsigfigs))
simparslist = parset.interp(start=project.settings.start, end=project.settings.end, dt=project.settings.dt)
results = project.runsim(simpars=simparslist)
updateplots(tmpresults=results)
return None
## Set up GUI
npars = len(fullkeylist)
leftmargin = 10
rowheight = 25
colwidth = 450
ncols = floor(npars/(maxrows+10*advanced))+1
nrows = ceil(nfull/float(ncols))
panelwidth = colwidth*ncols
panelheight = rowheight*(nfull/ncols+2)+50
buttonheight = panelheight-rowheight*1.5
boxoffset = 300+leftmargin
panel = pyqt.QWidget() # Create panel widget
panel.setGeometry(100, 100, panelwidth, panelheight)
spottaken = [] # Store list of existing entries, to avoid duplicates
for i in range(nfull):
row = (i % nrows) + 1
col = floor(i/float(nrows))
spot = (row,col)
if spot in spottaken:
errormsg = 'Cannot add a button to %s since there already is one!' % str(spot)
raise OptimaException(errormsg)
else: spottaken.append(spot)
texts.append(pyqt.QLabel(parent=panel))
texts[-1].setText(fulllabellist[i])
texts[-1].move(leftmargin+colwidth*col, rowheight*row)
boxes.append(pyqt.QLineEdit(parent = panel)) # Actually create the text edit box
boxes[-1].move(boxoffset+colwidth*col, rowheight*row)
printv('Setting up GUI checkboxes: %s' % [i, fulllabellist[i], boxoffset+colwidth*col, rowheight*row], 4, verbose)
boxes[-1].setText(sigfig(fullvallist[i], sigfigs=nsigfigs))
boxes[-1].returnPressed.connect(manualupdate)
keepbutton = pyqt.QPushButton('Keep', parent=panel)
resetbutton = pyqt.QPushButton('Reset', parent=panel)
closebutton = pyqt.QPushButton('Close', parent=panel)
keepbutton.move(1*panelwidth/4, buttonheight)
resetbutton.move(2*panelwidth/4, buttonheight)
closebutton.move(3*panelwidth/4, buttonheight)
keepbutton.clicked.connect(keeppars)
resetbutton.clicked.connect(resetpars)
closebutton.clicked.connect(closewindows)
panel.show()
def plotpeople(project=None, people=None, tvec=None, ind=None, simind=None, start=2, end=None, pops=None, animate=False, skipempty=True, verbose=2, toplot=None, **kwargs):
'''
A function to plot all people as a stacked plot
"Exclude" excludes the first N health states -- useful for excluding susceptibles.
Usage example:
import optima as op
P = op.defaults.defaultproject('simple')
P.runsim()
people = P.results[-1].raw[0]['people']
op.plotpeople(P, people)
NB: for a multiresult, simind must not be None!
Version: 2018apr0
'''
if pops is None: pops = Ellipsis # This is a slice
elif isnumber(pops): pops = [pops]
if pops is not Ellipsis: plottitle = str(array(project.parsets[0].popkeys)[array(pops)])
else: plottitle = 'All populations'
legendsettings = {'loc':'upper left', 'bbox_to_anchor':(1.02, 1), 'fontsize':11, 'title':''}
nocolor = (0.9,0.9,0.9)
labels = project.settings.statelabels
if toplot is None: toplot = 'people'
if people is None:
if ind is None: ind=-1
try:
people = project.results[ind].raw[0][toplot] # Try to get default people to plot
except:
if simind is None: simind = 1
people = project.results[ind].raw[simind][0][toplot] # It's a multiresult: need another index
plotstyles = odict([
('susreg', ('|','|')),
('progcirc', ('+','|')),
('undx', ('O','o')),
('dx', ('.','o')),
('care', ('*','*')),
('lost', ('X','|')),
('usvl', ('.','o')),
('svl', ('*','*')),
])
hatchstyles = []
linestyles = []
for key in plotstyles.keys():
hatchstyles.extend([plotstyles[key][0] for lab in labels if lab.startswith(key)])
linestyles.extend([plotstyles[key][1] for lab in labels if lab.startswith(key)])
labels = labels[start:end]
hatchstyles = hatchstyles[start:end]
linestyles = linestyles[start:end]
ppl = people[start:end,:,:] # Exclude initial people
ppl = ppl[:,pops,:] # Filter selected populations
ppl = ppl[:,:,:].sum(axis=1) # Sum over people
ppl = transpose(ppl) # So time is plotted on x-axis
nstates = len(labels)
colors = gridcolors(nstates)
if tvec is None:
tvec = project.settings.maketvec() # Won't necessarily match this ppl, supply as argument if so
bottom = 0*tvec
makenewfigure(**kwargs)
ax = subplot(111)
ylabel('Number of people')
title(plottitle)
xlim((tvec[0], tvec[-1]))
for st in range(nstates-1,-1,-1):
this = ppl[:,st]
if sum(this):
thiscolor = colors[st]
haspeople = True
else:
thiscolor = nocolor
haspeople = False
if haspeople or not skipempty:
printv('State: %i/%i Hatch: %s Line: %s Color: %s' % (st, nstates, hatchstyles[st], linestyles[st], thiscolor), 4, verbose)
fill_between(tvec, bottom, this+bottom, facecolor=thiscolor, alpha=1, lw=0, hatch=hatchstyles[st])
bottom += this
# Legend stuff
ax.plot((0, 0), (0, 0), color=thiscolor, linewidth=10, label=labels[st], marker=linestyles[st]) # This loop is JUST for the legends! since fill_between doesn't count as a plot object... -- TODO: this is copied from plotepi(), perhaps streamline
handles, legendlabels = ax.get_legend_handles_labels()
legend(reversed(handles), reversed(legendlabels), **legendsettings)
if animate:
show()
pause(0.001)
return None
global plotparsbackbut, plotparsnextbut, plotparslider
def plotpars(parslist=None, start=None, end=None, verbose=2, rows=6, cols=5, figsize=(16,12), fontsize=8, die=True, **kwargs):
'''
A function to plot all parameters. 'pars' can be an odict or a list of pars odicts.
Version: 2016jan30
'''
from optima import Par, makesimpars, tic, toc
from numpy import array, vstack
import matplotlib.pyplot as plt
from matplotlib.widgets import Button, Slider
global position, plotparsbackbut, plotparsnextbut, plotparslider
position = 0
# In case the user tries to enter a project or parset -- TODO: make more flexible
tmp = parslist
try: parslist = tmp.parsets[-1].pars # If it's a project
except:
try: parslist = tmp.pars # If it's a parset
except: pass
parslist = promotetolist(parslist) # Convert to list
try:
for i in range(len(parslist)): parslist[i] = parslist[i].pars
except: pass # Assume it's in the correct form -- a list of pars odicts
allplotdata = []
for pars in parslist:
count = 0
simpars = makesimpars(pars, start=start, end=end)
tvec = simpars['tvec']
plotdata = array([['name','simpar','par_t', 'par_y']], dtype=object) # Set up array for holding plotting results
for i,key1 in enumerate(pars):
par = pars[key1]
if isinstance(par, Par):
if hasattr(par,'y'): pardata = par.y # TODO: consider adding par.m as well
elif hasattr(par,'p'): pardata = par.p # Population size
else: raise Exception('???')
if hasattr(pardata, 'keys') and len(pardata.keys())>0: # Only ones that don't have a len are temp pars
nkeys = len(pardata.keys())
for k,key2 in enumerate(pardata.keys()):
if hasattr(par, 't'): t = par.t[key2]
else: t = tvec[0] # For a constant
count += 1
if nkeys==1: thissimpar = simpars[key1]
else: thissimpar = simpars[key1][k]
thisplot = array(['%3i. %s - %s' % (count-1, key1, key2), thissimpar, t, pardata[key2]], dtype=object)
if array(thissimpar).sum()==0: thisplot[0] += ' (zero)'
plotdata = vstack([plotdata, thisplot])
else:
t = tvec[0] # For a constant
count += 1
thisplot = array(['%3i. %s' % (count-1, key1), simpars[key1], t, pardata], dtype=object)
plotdata = vstack([plotdata, thisplot])
plotdata = plotdata[1:,:] # Remove header
allplotdata.append(plotdata)
## Do plotting
nplots = len(plotdata)
if any([len(pltd)!=nplots for pltd in allplotdata]):
printv('Warning, not all pars are the same length, only plotting first', 2, verbose)
allplotdata = allplotdata[0]
nperscreen = rows*cols
plotparsfig = plt.figure(facecolor=(0.9,0.9,0.9), figsize=figsize)
plt.subplots_adjust(left=0.05, right=0.95, bottom=0.1, top=0.95, wspace=0.3, hspace=0.4)
plotparsaxs = []
count = 0
for row in range(rows):
for col in range(cols):
count += 1
plotparsaxs.append(plotparsfig.add_subplot(rows, cols, count))
backframe = plotparsfig.add_axes([0.1, 0.03, 0.1, 0.03])
sliderframe = plotparsfig.add_axes([0.3, 0.03, 0.4, 0.03])
nextframe = plotparsfig.add_axes([0.8, 0.03, 0.1, 0.03])
plotparsbackbut = Button(backframe, 'Back')
plotparsnextbut = Button(nextframe, 'Next')
plotparslider = Slider(sliderframe, '', 0, nplots, valinit=0, valfmt='%d')
def updateb(event=None):
global position
position -= nperscreen
position = max(0,position)
position = min(nplots-nperscreen, position)
plotparslider.set_val(position)
def updaten(event=None):
global position
position += nperscreen
position = max(0,position)
position = min(nplots-nperscreen, position)
plotparslider.set_val(position)
def update(tmp=0):
global position, plotparslider
position = tmp
position = max(0,position)
position = min(nplots-nperscreen, position)
t = tic()
for i,ax in enumerate(plotparsaxs):
ax.cla()
for item in ax.get_xticklabels() + ax.get_yticklabels(): item.set_fontsize(fontsize)
ax.hold(True)
nplt = i+position
if nplt<nplots:
for pd,plotdata in enumerate(allplotdata):
try:
this = plotdata[nplt,:]
ax.set_title(this[0])
if isinstance(this[1], dict):
if len(this[1].keys())==1: this[1] = this[1][0] # Actually needs to be an odict
elif len(this[1].keys())>1: raise OptimaException('Expecting a number or an array or even an odict with one key, but got an odict with multiple keys (%s)' % this[0])
if isnumber(this[1]): ax.plot(tvec, 0*tvec+this[1])
elif len(this[1])==0: ax.set_title(this[0]+' is empty')
elif len(this[1])==1: ax.plot(tvec, 0*tvec+this[1])
elif len(this[1])==len(tvec): ax.plot(tvec, this[1])
else: pass # Population size, doesn't use control points
printv('Plot %i/%i...' % (i*len(allplotdata)+pd+1, len(plotparsaxs)*len(allplotdata)), 2, verbose)
except Exception as E:
if die: raise E
else: print('??????: %s' % repr(E))
try:
if not(hasattr(this[3],'__len__') and len(this[3])==0): ax.scatter(this[2],this[3])
except Exception: pass # print('Problem with "%s": "%s"' % (this[0], repr(E)))
if pd==len(allplotdata)-1: # Do this for the last plot only
ax.set_ylim((0,1.1*ax.get_ylim()[1]))
ax.set_xlim((tvec[0],tvec[-1]))
toc(t)
update()
plotparsbackbut.on_clicked(updateb)
plotparsnextbut.on_clicked(updaten)
plotparslider.on_changed(update)
return allplotdata
def showplots(plots=None, figsize=None):
'''
This function can be used to show plots (in separate figure windows, independently
of generating them.
Example:
import optima as op
P = op.demo(0)
plot = plotcascade(results=P.result(), interactive=False)
op.showplots(plot) # Creates one plot
NOTE: This function is purely remedial; the same effect can be accomplished more easily via:
op.plotcascade(results=P.result(), interactive=True)
Version: 2017may29
'''
ion()
if figsize is None: figsize = (10,4)
reanimateplots(plots) # Reconnect the plots to the matplotlib backend so they can be rendered
nplots = len(plots)
figs = []
for p in range(nplots):
figs.append(figure(facecolor=(1,1,1),figsize=figsize))
thisfig = figs[p]
thisplot = plots[p].axes[0]
thisfig._axstack.add(thisfig._make_key(thisplot), thisplot) # Add a plot to the axis stack
thisplot.change_geometry(1, 1, 1) # Change geometry to be correct
orig = thisplot.get_position() # get the original position
widthfactor = 0.9
heightfactor = 0.9
pos2 = [orig.x0, orig.y0, orig.width*widthfactor, orig.height*heightfactor]
thisplot.set_position(pos2) # set a new position
if nplots>1: return figs
else: return figs[0] # Don't return a list if a single figure
def loadplot(filename=None):
'''
Load a plot from a file and reanimate it.
Example usage:
import optima as op
P = op.demo(0)
op.saveplots(P, toplot='cascade', filetype='fig')
Later:
cascadefig = op.loadplot('cascade.fig')
'''
ion() # Without this, it doesn't show up
fig = loadobj(filename)
reanimateplots(fig)
return fig
##############################################################################
### HELPER FUNCTIONS
##############################################################################
def makenewfigure(**figargs):
''' PyQt-specific function for maximizing the current figure '''
global scrwid, scrhei
if 'figsize' not in figargs: figargs['figsize'] = (scrwid, scrhei)
if 'facecolor' not in figargs: figargs['facecolor'] = (1,1,1)
fig = figure(**figargs) # Create a figure based on supplied kwargs, if any
return fig
def closegui(event=None):
''' Close all GUI windows '''
global panelfig, plotfig
try: close(plotfig)
except: pass
try: close(panelfig)
except: pass
return None
def getchecked(check=None):
''' Return a list of whether or not each check box is checked or not '''
ischecked = []
for box in range(len(check.lines)): ischecked.append(check.lines[box][0].get_visible()) # Stupid way of figuring out if a box is ticked or not
return ischecked
def clearselections(event=None):
global check
for box in range(len(check.lines)):
for i in [0,1]: check.lines[box][i].set_visible(False)
updateplots()
return None
def defaultselections(event=None):
''' Reset to default options '''
global check, results, globaladvanced
plotselections = getplotselections(results, advanced=globaladvanced) # WARNING, assumes defaults don't change with advanced
for box,tf in enumerate(plotselections['defaults']):
if tf: # True if in defaults, false otherwise
for i in [0,1]: check.lines[box][i].set_visible(True) # Two lines...stupid
else:
for i in [0,1]: check.lines[box][i].set_visible(False)
updateplots()
return None
def advancedselections(event=None):
''' Toggle advance doptions '''
global check, checkboxes, updatebutton, clearbutton, defaultsbutton, advancedbutton, closebutton, plotfig, panelfig, results, plotargs, globaladvanced
globaladvanced = not(globaladvanced) # Toggle
try: close(plotfig) # These work better here than caling closegui() directly
except: pass
try: close(panelfig)
except: pass
check, checkboxes, updatebutton, clearbutton, defaultsbutton, advancedbutton, closebutton, plotfig, panelfig, plotargs = [None]*10 # Clear the bejesus out of everything
pygui(results, advanced=globaladvanced)
print('Switching to/from advanced; if GUI hangs, press enter in console') # Unfortunately, this happens from time to time
pause(0.2) # Without this, it doesn't work...siiiigh
return None
def zoomplots(event=None, ratio=1.0):
''' Zoom in or out '''
global plotfig
for ax in plotfig.axes:
axpos = ax.get_position()
x0 = axpos.x0
x1 = axpos.x1
y0 = axpos.y0
y1 = axpos.y1
xdiff = x1-x0
ydiff = y1-y0
xchange = xdiff*(1-ratio)/2.0
ychange = ydiff*(1-ratio)/2.0
ax.set_position([x0+xchange, y0+ychange, xdiff*ratio, ydiff*ratio])
return None
def zoomin(event=None):
''' Zoom into plots '''
zoomplots(event=event, ratio=1.1)
return None
def zoomout(event=None):
''' Zoom out of plots '''
zoomplots(event=event, ratio=0.9)
return None
def updateplots(event=None, tmpresults=None, **kwargs):
''' Close current window if it exists and open a new one based on user selections '''
global plotfig, check, checkboxes, results, plotargs
if tmpresults is not None: results = tmpresults
# If figure exists, get size, then close it
if plotfig is None: plotfig = makenewfigure()
width,height = plotfig.get_size_inches(); close(plotfig) # Get current figure dimensions
# Get user selections
ischecked = getchecked(check)
toplot = array(checkboxes)[array(ischecked)].tolist() # Use logical indexing to get names to plot
# Do plotting
if sum(ischecked): # Don't do anything if no plots
plotfig = makenewfigure(num='Optima results', figsize=(width, height), facecolor=(1,1,1)) # Create figure with correct number of plots
for key in ['toplot','fig','figsize']: kwargs.pop(key, None) # Remove duplicated arguments if they exist
plotresults(results, toplot=toplot, fig=plotfig, figsize=(width, height), **plotargs)
return None
##############################################################################################################################
### GEOSPATIAL GUI
##############################################################################################################################
"""
GEOSPATIAL
This file defines everything needed for the Python GUI for geospatial analysis.
Version: 2017mar22
"""
from optima import Project, Portfolio, loadproj, saveobj, defaultobjectives, makegeospreadsheet, makegeoprojects
from time import time
global geoguiwindow, globalportfolio, globalobjectives
if 1: geoguiwindow, globalportfolio, globalobjectives = [None]*3
## Global options
budgetfactor = 1e6 # Conversion between screen and internal
prjext = '.prj'
prtext = '.prt'
##############################################################################################################################
## Define functions
##############################################################################################################################
def resetbudget():
''' Replace current displayed budget with default from portfolio '''
global globalportfolio, objectiveinputs
totalbudget = 0
for project in globalportfolio.projects.values():
totalbudget += sum(project.progsets[0].getdefaultbudget().values())
objectiveinputs['budget'].setText(str(totalbudget/budgetfactor))
return None
def warning(message, usegui=True):
''' usegui kwarg is so this can be used in a GUI and non-GUI context '''
global geoguiwindow
if usegui:
pyqt.QMessageBox.warning(geoguiwindow, 'Message', message)
else:
print(message)
def gui_loadproj():
''' Helper function to load a project, since used more than once '''
filepath = pyqt.QFileDialog.getOpenFileName(caption='Choose project file', filter='*'+prjext)
project = None
if filepath:
try: project = loadproj(filepath, verbose=0)
except Exception as E: print('Could not load file "%s": "%s"' % (filepath, repr(E)))
if type(project)==Project: return project
else: print('File "%s" is not an Optima project file' % filepath)
else:
print('No filepath provided')
return project
def gui_makesheet():
''' Create a geospatial spreadsheet template based on a project file '''
## 1. Load a project file
project = gui_loadproj() # No, it's a project path, load it
if project is None:
raise OptimaException('No project loaded.')
try: results = project.parsets[-1].getresults()
except: results = project.runsim(name=project.parsets[-1].name)
copies, ok = pyqt.QInputDialog.getText(geoguiwindow, 'GA Spreadsheet Parameter', 'How many variants of the chosen project do you want?')
try: copies = int(copies)
except: raise OptimaException('Input (number of project copies) cannot be converted into an integer.')
refyear, ok = pyqt.QInputDialog.getText(geoguiwindow, 'GA Spreadsheet Parameter', 'Select a reference year for which you have district data.')
try: refyear = int(refyear)
except: raise OptimaException('Input (reference year) cannot be converted into an integer.')
if not refyear in [int(x) for x in results.tvec]:
raise OptimaException("Input not within range of years used by aggregate project's last stored calibration.")
## 2. Get destination filename
spreadsheetpath = pyqt.QFileDialog.getSaveFileName(caption='Save geospatial spreadsheet file', filter='*.xlsx')
# 4. Generate and save spreadsheet
try:
makegeospreadsheet(project=project, filename=spreadsheetpath, copies=copies, refyear=refyear, verbose=2)
warning('Multi-project template saved to "%s".' % spreadsheetpath)
except:
warning('Error: Template not saved due to a workbook error!')
return None
def gui_makeproj():
''' Create a series of project files based on a seed file and a geospatial spreadsheet '''
project = gui_loadproj()
spreadsheetpath = pyqt.QFileDialog.getOpenFileName(caption='Choose geospatial spreadsheet', filter='*.xlsx')
destination = pyqt.QFileDialog.getExistingDirectory(caption='Choose output folder')
makegeoprojects(project=project, spreadsheetpath=spreadsheetpath, destination=destination)
warning('Created projects from spreadsheet')
return None
def gui_create(filepaths=None, portfolio=None, doadd=False):
''' Create a portfolio by selecting a list of projects; silently skip files that fail '''
global globalportfolio, projectslistbox, objectiveinputs
projectpaths = []
projectslist = []
if globalportfolio is None:
globalportfolio = Portfolio()
if not doadd:
globalportfolio = Portfolio()
projectslistbox.clear()
if doadd and portfolio != None:
globalportfolio = portfolio
filepaths = pyqt.QFileDialog.getOpenFileNames(caption='Choose project files', filter='*'+prjext)
if filepaths:
if type(filepaths)==str: filepaths = [filepaths] # Convert to list
for filepath in filepaths:
tmpproj = None
try: tmpproj = loadproj(filepath, verbose=0)
except: print('Could not load file "%s"; moving on...' % filepath)
if tmpproj is not None:
if type(tmpproj)==Project:
projectslist.append(tmpproj)
projectpaths.append(filepath)
print('Project file "%s" loaded' % filepath)
else: print('File "%s" is not an Optima project file; moving on...' % filepath)
projectslistbox.addItems(projectpaths)
globalportfolio.addprojects(projectslist)
resetbudget() # And reset the budget
return None
def gui_addproj():
''' Add a project -- same as creating a portfolio except don't overwrite '''
gui_create(doadd=True)
resetbudget() # And reset the budget
return None
def gui_loadport():
''' Load an existing portfolio '''
global globalportfolio, projectslistbox
filepath = pyqt.QFileDialog.getOpenFileName(caption='Choose portfolio file', filter='*'+prtext)
tmpport = None
if filepath:
try: tmpport = loadobj(filepath, verbose=0)
except Exception as E:
warning('Could not load file "%s" because "%s"' % (filepath, repr(E)))
return None
if tmpport is not None:
if type(tmpport)==Portfolio:
globalportfolio = tmpport
projectslistbox.clear()
projectslistbox.addItems([proj.name for proj in globalportfolio.projects.values()])
print('Portfolio file "%s" loaded' % filepath)
else: print('File "%s" is not an Optima portfolio file' % filepath)
else:
warning('File path not provided. Portfolio not loaded.')
resetbudget() # And reset the budget
return None
def gui_rungeo():
''' Actually run geospatial analysis!!! '''
global globalportfolio, globalobjectives, objectiveinputs
starttime = time()
if globalobjectives is None:
globalobjectives = defaultobjectives()
globalobjectives['budget'] = 0.0 # Reset
for key in objectiveinputs.keys():
globalobjectives[key] = eval(str(objectiveinputs[key].text())) # Get user-entered values
globalobjectives['budget'] *= budgetfactor # Convert back to internal representation
BOCobjectives = dcp(globalobjectives)
try:
globalportfolio.genBOCs(objectives=BOCobjectives, maxtime=30, mc=0)
globalportfolio.runGA(objectives=globalobjectives, maxtime=30, reoptimize=True, mc=0, batch=True, verbose=2, die=False, strict=True)
except Exception as E:
warning('Geospatial analysis failed: %s' % repr(E))
warning('Geospatial analysis finished running; total time: %0.0f s' % (time() - starttime))
return None
def gui_plotgeo():
''' Actually plot geospatial analysis!!! '''
global globalportfolio
if globalportfolio is None:
warning('Please load a portfolio first')
return None
globalportfolio.plotBOCs(deriv=False)
return None
def gui_export():
''' Save the current results to Excel file '''
global globalportfolio
if type(globalportfolio)!=Portfolio: warning('Warning, must load portfolio first!')
# 2. Create a new file dialog to save this spreadsheet
filepath = pyqt.QFileDialog.getSaveFileName(caption='Save geospatial analysis results file', filter='*.xlsx')
# 3. Generate spreadsheet according to David's template to store these data
if filepath:
try:
globalportfolio.export(filename=filepath)
except Exception as E:
warning('Results export failed: %s' % repr(E))
warning('Results saved to "%s".' % filepath)
else:
warning('Filepath not supplied: %s' % filepath)
return None
def gui_saveport():
''' Save the current portfolio '''
global globalportfolio
filepath = pyqt.QFileDialog.getSaveFileName(caption='Save portfolio file', filter='*'+prtext)
saveobj(filepath, globalportfolio)
return None
def closewindow():
''' Close the control panel '''
global geoguiwindow
geoguiwindow.close()
return None
def geogui():
'''
Open the GUI for doing geospatial analysis.
Version: 2016jan23
'''
global geoguiwindow, globalportfolio, globalobjectives, objectiveinputs, projectslistbox, projectinfobox
globalportfolio = None
if globalobjectives is None:
globalobjectives = defaultobjectives()
globalobjectives['budget'] = 0.0 # Reset
## Set parameters
wid = 1200.0
hei = 600.0
top = 20
spacing = 40
left = 20.
## Housekeeping
fig = figure(); close(fig) # Open and close figure...dumb, no? Otherwise get "QWidget: Must construct a QApplication before a QPaintDevice"
geoguiwindow = pyqt.QWidget() # Create panel widget
geoguiwindow.setGeometry(100, 100, wid, hei)
geoguiwindow.setWindowTitle('Optima geospatial analysis')
##############################################################################################################################
## Define buttons
##############################################################################################################################
## Define buttons
buttons = odict()
buttons['makesheet'] = pyqt.QPushButton('Make geospatial spreadsheet from project', parent=geoguiwindow)
buttons['makeproj'] = pyqt.QPushButton('Auto-generate projects from spreadsheet', parent=geoguiwindow)
buttons['create'] = pyqt.QPushButton('Create portfolio from projects', parent=geoguiwindow)
buttons['add'] = pyqt.QPushButton('Add projects to portfolio', parent=geoguiwindow)
buttons['loadport'] = pyqt.QPushButton('Load existing portfolio', parent=geoguiwindow)
buttons['rungeo'] = pyqt.QPushButton('Run geospatial analysis', parent=geoguiwindow)
buttons['plotgeo'] = pyqt.QPushButton('Plot geospatial results', parent=geoguiwindow)
buttons['export'] = pyqt.QPushButton('Export results', parent=geoguiwindow)
buttons['saveport'] = pyqt.QPushButton('Save portfolio', parent=geoguiwindow)
buttons['close'] = pyqt.QPushButton('Close', parent=geoguiwindow)
## Define button functions
actions = odict()
actions['makesheet'] = gui_makesheet
actions['makeproj'] = gui_makeproj
actions['create'] = gui_create
actions['add'] = gui_addproj
actions['loadport'] = gui_loadport
actions['rungeo'] = gui_rungeo
actions['plotgeo'] = gui_plotgeo
actions['export'] = gui_export
actions['saveport'] = gui_saveport
actions['close'] = closewindow
## Set button locations
spacer = 0
for b,key in enumerate(buttons.keys()):
if key=='rungeo': spacer = 170
buttons[key].move(left, top+spacing*b+spacer)
## Define button functions
for key in buttons.keys():
buttons[key].clicked.connect(actions[key])
##############################################################################################################################
## Define other objects
##############################################################################################################################
def updateprojectinfo():
global globalportfolio, projectslistbox, projectinfobox
ind = projectslistbox.currentRow()
project = globalportfolio.projects[ind]
projectinfobox.setText(repr(project))
return None
def removeproject():
global projectslistbox, projectinfobox, globalportfolio
ind = projectslistbox.currentRow()
globalportfolio.projects.pop(globalportfolio.projects.keys()[ind]) # Remove from portfolio
projectslistbox.takeItem(ind) # Remove from list
return None
## List of projects
projectslistlabel = pyqt.QLabel(parent=geoguiwindow)
projectslistlabel.setText('Projects in this portfolio:')
projectslistbox = pyqt.QListWidget(parent=geoguiwindow)
projectslistbox.verticalScrollBar()
projectslistbox.currentItemChanged.connect(updateprojectinfo)
buttons['remove'] = pyqt.QPushButton('Remove selected project from portfolio', parent=geoguiwindow)
buttons['remove'].clicked.connect(removeproject)
projectslistlabel.move(330,20)
projectslistbox.move(330, 40)
buttons['remove'].move(330, hei-40)
projectslistbox.resize(300, hei-100)
## Project info
projectsinfolabel = pyqt.QLabel(parent=geoguiwindow)
projectsinfolabel.setText('Information about the selected project:')
projectinfobox = pyqt.QTextEdit(parent=geoguiwindow)
projectinfobox.setReadOnly(True)
projectinfobox.verticalScrollBar()
projectsinfolabel.move(640,20)
projectinfobox.move(640, 40)
projectinfobox.resize(530, hei-100)
## Objectives
objectivetext = odict()
objectivetext['start'] = 'Start year:'
objectivetext['end'] = 'End year:'
objectivetext['budget'] = 'Total budget (mil.):'
objectivetext['deathweight'] = 'Deaths weight:'
objectivetext['inciweight'] = 'Infections weight:'
objectivetextobjs = odict()
for k,key in enumerate(objectivetext.keys()):
objectivetextobjs[key] = pyqt.QLabel(parent=geoguiwindow)
objectivetextobjs[key].setText(str(objectivetext[key]))
objectivetextobjs[key].move(left+10, 235+k*30)
objectiveinputs = odict()
for k,key in enumerate(objectivetext.keys()):
objectiveinputs[key] = pyqt.QLineEdit(parent=geoguiwindow)
objectiveinputs[key].setText(str(globalobjectives[key]))
objectiveinputs[key].move(left+120, 230+k*30)
objectiveinputs['budget'].setText(str(globalobjectives['budget']/budgetfactor)) # So right units
geoguiwindow.show()
|
optimamodel/Optima
|
optima/gui.py
|
Python
|
lgpl-3.0
| 48,120
|
# Django settings for babynames project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'data_monging', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'mark',
'PASSWORD': 'blah',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Detroit'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '_p-f$nh91u39((laogm+tx^b37=$)g*023pk5tp!8bih74a0u!'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'babynames.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'babynames.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'web',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
MarkNenadov/OntarioBabyNames
|
babynames/babynames/settings.py
|
Python
|
lgpl-3.0
| 5,393
|
# -*- coding: utf-8 -*-
"""The function module of dolfin"""
from dolfin.functions import multimeshfunction
from dolfin.functions import functionspace
from dolfin.functions import function
from dolfin.functions import constant
from dolfin.functions import expression
from dolfin.functions import specialfunctions
from .multimeshfunction import *
from .functionspace import *
from .function import *
from .constant import *
from .expression import *
from .specialfunctions import *
# NOTE: The automatic documentation system in DOLFIN requires to _not_ define
# NOTE: classes or functions within this file. Use separate modules for that
# NOTE: purpose.
__all__ = functionspace.__all__ + function.__all__ + constant.__all__ + \
expression.__all__ + specialfunctions.__all__ + \
multimeshfunction.__all__
|
FEniCS/dolfin
|
site-packages/dolfin/functions/__init__.py
|
Python
|
lgpl-3.0
| 827
|
# IfcOpenShell - IFC toolkit and geometry engine
# Copyright (C) 2021 Dion Moult <dion@thinkmoult.com>
#
# This file is part of IfcOpenShell.
#
# IfcOpenShell is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# IfcOpenShell is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with IfcOpenShell. If not, see <http://www.gnu.org/licenses/>.
class Usecase:
def __init__(self, file, **settings):
self.file = file
self.settings = {"profile_set": None, "material": None}
for key, value in settings.items():
self.settings[key] = value
def execute(self):
profiles = list(self.settings["profile_set"].MaterialProfiles or [])
profile = self.file.create_entity("IfcMaterialProfile", **{"Material": self.settings["material"]})
profiles.append(profile)
self.settings["profile_set"].MaterialProfiles = profiles
return profile
|
IfcOpenShell/IfcOpenShell
|
src/ifcopenshell-python/ifcopenshell/api/material/add_profile.py
|
Python
|
lgpl-3.0
| 1,359
|
import coeffstore
import expressions
import caching
import libadjoint
from dolfin_adjoint import backend
if backend.__name__ == "dolfin":
import lusolver
# Create the adjointer, the central object that records the forward solve
# as it happens.
adjointer = libadjoint.Adjointer()
mem_checkpoints = set()
disk_checkpoints = set()
adj_variables = coeffstore.CoeffStore()
def adj_start_timestep(time=0.0):
'''Dolfin does not supply us with information about timesteps, and so more information
is required from the user for certain features. This function should be called at the
start of the time loop with the initial time (defaults to 0).
See also: :py:func:`dolfin_adjoint.adj_inc_timestep`
'''
if not backend.parameters["adjoint"]["stop_annotating"]:
adjointer.time.start(time)
def adj_inc_timestep(time=None, finished=False):
'''Dolfin does not supply us with information about timesteps, and so more information
is required from the user for certain features. This function should be called at
the end of the time loop with two arguments:
- :py:data:`time` -- the time at the end of the timestep just computed
- :py:data:`finished` -- whether this is the final timestep.
With this information, complex functional expressions using the :py:class:`Functional` class
can be used.
The finished argument is necessary because the final step of a functional integration must perform
additional calculations.
See also: :py:func:`dolfin_adjoint.adj_start_timestep`
'''
if not backend.parameters["adjoint"]["stop_annotating"]:
adj_variables.increment_timestep()
if time is not None:
adjointer.time.next(time)
if finished:
adjointer.time.finish()
# A dictionary that saves the functionspaces of all checkpoint variables that have been saved to disk
checkpoint_fs = {}
function_names = set()
def adj_check_checkpoints():
adjointer.check_checkpoints()
def adj_reset_cache():
if backend.parameters["adjoint"]["debug_cache"]:
backend.info_blue("Resetting solver cache")
caching.assembled_fwd_forms.clear()
caching.assembled_adj_forms.clear()
caching.lu_solvers.clear()
caching.localsolvers.clear()
caching.pis_fwd_to_tlm.clear()
caching.pis_fwd_to_adj.clear()
if backend.__name__ == "dolfin":
lusolver.lu_solvers = [None] * len(lusolver.lu_solvers)
lusolver.adj_lu_solvers = [None] * len(lusolver.adj_lu_solvers)
def adj_html(*args, **kwargs):
'''This routine dumps the current state of the adjglobals.adjointer to a HTML visualisation.
Use it like:
- adj_html("forward.html", "forward") # for the equations recorded on the forward run
- adj_html("adjoint.html", "adjoint") # for the equations to be assembled on the adjoint run
'''
return adjointer.to_html(*args, **kwargs)
def adj_reset():
'''Forget all annotation, and reset the entire dolfin-adjoint state.'''
adjointer.reset()
expressions.expression_attrs.clear()
adj_variables.__init__()
function_names.__init__()
adj_reset_cache()
backend.parameters["adjoint"]["stop_annotating"] = False
# Map from FunctionSpace to LUSolver that has factorised the fsp mass matrix
fsp_lu = {}
|
pf4d/dolfin-adjoint
|
dolfin_adjoint/adjglobals.py
|
Python
|
lgpl-3.0
| 3,298
|
import unittest
from Tribler.community.market.core.ttl import Ttl
class TtlTestSuite(unittest.TestCase):
"""Ttl test cases."""
def setUp(self):
# Object creation
self.ttl = Ttl(0)
self.ttl2 = Ttl(2)
self.ttl3 = Ttl(2)
self.ttl4 = Ttl(1)
def test_init(self):
# Test for init validation
with self.assertRaises(ValueError):
Ttl(-100)
with self.assertRaises(ValueError):
Ttl('1')
def test_default(self):
# Test for default init
self.assertEqual(2, int(Ttl.default()))
def test_conversion(self):
# Test for conversions
self.assertEqual(0, int(self.ttl))
self.assertEqual(2, int(self.ttl2))
def test_make_hop(self):
# Test for make hop
self.assertEqual(2, int(self.ttl2))
self.ttl2.make_hop()
self.assertEqual(1, int(self.ttl2))
def test_is_alive(self):
# Test for is alive
self.assertTrue(self.ttl4.is_alive())
self.ttl4.make_hop()
self.assertFalse(self.ttl4.is_alive())
|
vandenheuvel/tribler
|
Tribler/Test/Community/Market/test_ttl.py
|
Python
|
lgpl-3.0
| 1,095
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Photo'
db.create_table(u'accounts_photo', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('file', self.gf('django.db.models.fields.CharField')(max_length=64)),
('is_primary', self.gf('django.db.models.fields.BooleanField')(default=False)),
('tag', self.gf('django.db.models.fields.CharField')(max_length=16, db_index=True)),
))
db.send_create_signal(u'accounts', ['Photo'])
def backwards(self, orm):
# Deleting model 'Photo'
db.delete_table(u'accounts_photo')
models = {
u'accounts.photo': {
'Meta': {'object_name': 'Photo'},
'file': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_primary': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tag': ('django.db.models.fields.CharField', [], {'max_length': '16', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'accounts.profile': {
'Meta': {'object_name': 'Profile'},
'age_group': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'chest': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'foot': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'height': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'hipline': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'preferred_style': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'}),
'waistline': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'weight': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['accounts']
|
ygrass/handsome
|
accounts/migrations/0003_auto__add_photo.py
|
Python
|
unlicense
| 5,858
|
import pygame
import random
import item
import mob
import tile
class Mapgen(object):
def __init__(self, level):
self.xsiz = 10
self.ysiz = 10
self.biome = "random"
self.procedure = 0
self.zone = []
self.level = level
self.sizefactor = 2
#self.items = pygame.sprite.Group()
#self.mobs = pygame.sprite.Group()
#creates the base map
def generate(self,x,y,biome):
self.zone = []
self.xsiz = x
self.ysiz = y
self.biome = biome
self.sizefactor = (x/10)+(y/10)
landtype = 0
#for num in range(sizefactor*3):
# itemo = item.Item(self.level, self.level.items)
# itemo.set_type(random.randrange(6)+1)
#for umb in range(sizefactor*3):
# mobbo = mob.Mob(self.level, self.level.mobs)
# mobbo.set_type(random.randrange(7))
# mobbo.set_species(random.randrange(4)+1)
#main land generation
for a in range(x):
mapcol = []
for b in range(y):
#Purely Random
if (self.procedure == 0):
landtype = random.randrange(17)+1
#probability manipulation
if (self.procedure == 1):
if (biome == "grassland"):
common = [1,2,3,13]
uncommon = [4,5,6,7]
rare = [8,9,10]
vrare = [12,15]
self.level.passable = 1
if(biome == "forest"):
common = [3,4,5,9]
uncommon = [1,2,6]
rare = [7,13]
vrare = [10,11,12]
self.level.passable = 2
if(biome == "desert"):
common = [8,7]
uncommon = [16,17]
rare = [9,13]
vrare = [1,2]
self.level.passable = 7
landex = random.randrange(256)
if landex < 256:
landtype = random.choice(common)
if landex < 64:
landtype = random.choice(uncommon)
if landex < 16:
landtype = random.choice(rare)
if landex < 2:
landtype = random.choice(vrare)
#generate the tiles
acre = tile.Land(self.level, self.level.terrain)
if a == 0 or b == 0 or a == x-1 or b == y-1:
acre.set_type(0)
self.level.space.add(acre)
for mobbo in self.level.mobs:
mobbo.unpassable.add(acre)
else:
acre.set_type(landtype)
acre.get_image()
acre.spawn(a, b)
self.level.background.add(acre)
mapcol.append(acre)
self.zone.append( mapcol )
for a in range(len(self.zone)):
for b in range(len(self.zone[0])):
place = self.zone[a][b]
if place in self.level.space:
pass
else:
for wa in range(3):
for ha in range(3):
if a+wa-1 >= len(self.zone) or b+ha-1 >= len(self.zone[0]):
pass
else:
place.neighbors.add(self.zone[a+wa-1][b+ha-1])
return self.zone
#causes deserts to expand
def desertify(self):
for place in self.level.terrain:
place.desert_check()
#causes forests to grow
def grow_forest(self):
for place in self.level.terrain:
place.forest_check()
#lowers sea level
def sea_lower(self):
for place in self.level.terrain:
if place.flavnum == 15:
if random.randrange(100) < 80:
place.set_type(14)
if place.flavnum == 14:
if random.randrange(100) < 70:
place.set_type(13)
if place.flavnum == 13:
if random.randrange(100) < 60:
place.set_type(1)
#raises sea level
def sea_fill(self):
for place in self.level.terrain:
excepts = [0,15,14,12,11,10]
if place.flavnum == 15:
for location in place.neighbors:
if location.flavnum in excepts:
pass
else:
location.set_type(14)
if place.flavnum == 14:
for location in place.neighbors:
if location.flavnum in excepts:
pass
else:
location.set_type(13)
if place.flavnum == 13:
for location in place.neighbors:
if random.randrange(100) < 10:
if location.flavnum in excepts:
pass
else:
location.set_type(13)
#populates the map with mobs
def populate(self, density):
for a in range(self.sizefactor*density):
mobbo = mob.Mob(self.level, self.level.mobs)
mobbo.set_type(random.randrange(7))
mobbo.set_species(random.randrange(4)+1)
mobbo.unpassable.add(self.level.space)
mobbo.spawn(random.randrange(len(self.zone)-2)+1,random.randrange(len(self.zone[0])-2)+1)
if mobbo.mapx == self.level.player1.mapx and mobbo.mapy == self.level.player1.mapy:
mobbo.kill()
#adds items to the map
def litter(self, density):
for a in range(self.sizefactor*density):
itemo = item.Item(self.level, self.level.items)
itemo.set_type(random.randrange(8))
itemo.spawn(random.randrange(len(self.zone)-2)+1,random.randrange(len(self.zone[0])-2)+1)
if itemo.mapx == self.level.player1.mapx and itemo.mapy == self.level.player1.mapy:
itemo.kill()
#adds landmarks
def monumentalize(self, number):
for a in range(number):
monument = tile.Landmark(self.level, self.level.background)
monument.set_type(random.randrange(4))
monument.spawn(random.randrange(len(self.zone)-3)+1,random.randrange(len(self.zone[0])-3)+1)
pygame.sprite.spritecollide(monument, self.level.landmarks, True)
self.level.landmarks.add(monument)
|
Lincoln-Cybernetics/Explore-
|
mapgen.py
|
Python
|
unlicense
| 7,161
|
import time
import os
import sys
import pickle
a=sys.argv[1]
q=open('facultysubject.txt','r')
w=open('facultydept.txt','r')
e=open('facultyprojects.txt','r')
dic1={}
dic2={}
dic3={}
dic1=pickle.load(q)
##print dic1.keys()
dic2=pickle.load(w)
##print dic2.keys()
dic3=pickle.load(e)
#3print dic3.keys()
print 'NAME OF FACULTY: '+a+'\n'
print 'DETAILS AVAILABLE:- '+'\n'
print ' **************SEARACHING***************'
time.sleep(2)
if dic1.has_key(a)==True :
var1=dic1[a]
##print var1
var ='SUBJECT OF FACULTY: '+var1
print var+'\n'
if dic2.has_key(a)==True :
var2=dic2[a]
var3='DEPT. OF FACULTY: '+var2
print var3+'\n'
if dic3.has_key(a)==True :
var4=dic3[a]
var5='ONGOING PROJECTS: '+var4
print var5+'\n'
if dic1.has_key(a)==False and dic2.has_key(a)==False and dic3.has_key(a)==False :
print ' **************SEARACHING***************'
time.sleep(2)
print ' ........Sorry!No Matches Found.........'
|
gangulydebojyoti/Searching-inside-a-college-server
|
Searching_Faculty.py
|
Python
|
unlicense
| 928
|
import re
class Solution(object):
def decodeString(self, s):
"""
:type s: str
:rtype: str
"""
# Original idea by @StefanPochmann
r = re.compile(r"(\d+)\[([^\d\[\]]*)\]")
while r.search(s):
s = r.sub(lambda m: int(m.group(1)) * m.group(2), s)
return s
|
stachenov/PyLeetCode
|
problems/decode_string.py
|
Python
|
unlicense
| 333
|
#!/usr/bin/env python3
# 574A_bear.py - Codeforces.com/problemset/problem/574/A Bear program by Sergey 2015
import unittest
import sys
###############################################################################
# Bear Class
###############################################################################
class Bear:
""" Bear representation """
def __init__(self, test_inputs=None):
""" Default constructor """
it = iter(test_inputs.split("\n")) if test_inputs else None
def uinput():
return next(it) if it else sys.stdin.readline().rstrip()
# Reading single elements
self.n = int(uinput())
# Reading a single line of multiple elements
self.nums = list(map(int, uinput().split()))
def calculate(self):
""" Main calcualtion function of the class """
lamak = self.nums[0]
srt = sorted(self.nums[1:])
result = 0
while lamak <= srt[-1]:
srt[-1] -= 1
lamak += 1
result += 1
srt = sorted(srt)
return str(result)
###############################################################################
# Unit Tests
###############################################################################
class unitTests(unittest.TestCase):
def test_single_test(self):
""" Bear class testing """
# Constructor test
test = "5\n5 1 11 2 8"
d = Bear(test)
self.assertEqual(d.n, 5)
self.assertEqual(d.nums, [5, 1, 11, 2, 8])
# Sample test
self.assertEqual(Bear(test).calculate(), "4")
# Sample test
test = "4\n1 8 8 8"
self.assertEqual(Bear(test).calculate(), "6")
# Sample test
test = "2\n7 6"
self.assertEqual(Bear(test).calculate(), "0")
# My tests
test = "4\n0 1 1 1"
self.assertEqual(Bear(test).calculate(), "2")
# Time limit test
self.time_limit_test(100)
def time_limit_test(self, nmax):
""" Timelimit testing """
import random
import timeit
# Random inputs
test = str(nmax) + "\n"
test += "0 "
nums = [1000 for i in range(nmax-1)]
test += " ".join(map(str, nums)) + "\n"
# Run the test
start = timeit.default_timer()
d = Bear(test)
calc = timeit.default_timer()
d.calculate()
stop = timeit.default_timer()
print("\nTimelimit Test: " +
"{0:.3f}s (init {1:.3f}s calc {2:.3f}s)".
format(stop-start, calc-start, stop-calc))
if __name__ == "__main__":
# Avoiding recursion limitaions
sys.setrecursionlimit(100000)
if sys.argv[-1] == "-ut":
unittest.main(argv=[" "])
# Print the result string
sys.stdout.write(Bear().calculate())
|
snsokolov/contests
|
codeforces/574A_bear.py
|
Python
|
unlicense
| 2,847
|
from django import forms
from .models import Topic, Entry
class TopicForm(forms.ModelForm):
class Meta:
model = Topic
fields = ['text']
labels = {'text': ''}
class EntryForm(forms.ModelForm):
class Meta:
model = Entry
fields = ['text']
labels = {'text': ''}
widgets = {'text': forms.Textarea(attrs={'cols': 80})}
|
ivanxalie/learning_log
|
learning_logs/forms.py
|
Python
|
unlicense
| 394
|
from datetime import timedelta, datetime
from airflow import DAG
from airflow.contrib.operators.bigquery_operator import BigQueryOperator
from airflow.contrib.operators.bigquery_to_gcs import BigQueryToCloudStorageOperator
from airflow.contrib.operators.gcs_to_bq import GoogleCloudStorageToBigQueryOperator
from dags.support import schemas
seven_days_ago = datetime.combine(datetime.today() - timedelta(7),
datetime.min.time())
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': seven_days_ago,
'email': ['alex@vanboxel.be'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=30),
}
with DAG('v1_8_bigquery', schedule_interval=timedelta(days=1),
default_args=default_args) as dag:
bq_extract_one_day = BigQueryOperator(
task_id='bq_extract_one_day',
bql='gcp_smoke/gsob_extract_day.sql',
destination_dataset_table=
'{{var.value.gcq_dataset}}.gsod_partition{{ ds_nodash }}',
write_disposition='WRITE_TRUNCATE',
bigquery_conn_id='gcp_smoke',
use_legacy_sql=False
)
bq2gcp_avro = BigQueryToCloudStorageOperator(
task_id='bq2gcp_avro',
source_project_dataset_table='{{var.value.gcq_dataset}}.gsod_partition{{ ds_nodash }}',
destination_cloud_storage_uris=[
'gs://{{var.value.gcs_bucket}}/{{var.value.gcs_root}}/gcp_smoke_bq/bq_to_gcp_avro/{{ ds_nodash }}/part-*.avro'
],
export_format='AVRO',
bigquery_conn_id='gcp_smoke',
)
bq2gcp_override = BigQueryToCloudStorageOperator(
task_id='bq2gcp_override',
source_project_dataset_table='{{var.value.gcq_dataset}}.gsod_partition{{ ds_nodash }}',
destination_cloud_storage_uris=[
'gs://{{var.value.gcs_bucket}}/{{var.value.gcs_root}}/gcp_smoke_bq/bq_to_gcp_avro/99999999/part-*.avro'
],
export_format='AVRO',
bigquery_conn_id='gcp_smoke',
)
gcs2bq_avro_auto_schema = GoogleCloudStorageToBigQueryOperator(
task_id='gcs2bq_avro_auto_schema',
bucket='{{var.value.gcs_bucket}}',
source_objects=[
'{{var.value.gcs_root}}/gcp_smoke_bq/bq_to_gcp_avro/{{ ds_nodash }}/part-*'
],
destination_project_dataset_table='{{var.value.gcq_tempset}}.avro_auto_schema{{ ds_nodash }}',
source_format='AVRO',
create_disposition='CREATE_IF_NEEDED',
write_disposition='WRITE_TRUNCATE',
google_cloud_storage_conn_id='gcp_smoke',
bigquery_conn_id='gcp_smoke'
)
gcs2bq_avro_with_schema = GoogleCloudStorageToBigQueryOperator(
task_id='gcs2bq_avro_with_schema',
bucket='{{var.value.gcs_bucket}}',
source_objects=[
'{{var.value.gcs_root}}/gcp_smoke_bq/bq_to_gcp_avro/{{ ds_nodash }}/part-*'
],
destination_project_dataset_table='{{var.value.gcq_tempset}}.avro_with_schema{{ ds_nodash }}',
source_format='AVRO',
schema_fields=schemas.gsob(),
create_disposition='CREATE_IF_NEEDED',
write_disposition='WRITE_TRUNCATE',
google_cloud_storage_conn_id='gcp_smoke',
bigquery_conn_id='gcp_smoke'
)
bq_extract_one_day >> bq2gcp_avro >> bq2gcp_override
bq2gcp_avro >> gcs2bq_avro_auto_schema
bq2gcp_avro >> gcs2bq_avro_with_schema
|
alexvanboxel/airflow-gcp-examples
|
dags/bigquery.py
|
Python
|
apache-2.0
| 3,412
|
#!/usr/bin/python
#
# Copyright (c) 2015, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
DOCUMENTATION = """
---
module: eos_vlan
short_description: Manage VLAN resources in EOS
description:
- The eos_vlan module manages VLAN configurations on Arista EOS nodes.
version_added: 1.0.0
category: Bridging
author: Arista EOS+
requirements:
- Arista EOS 4.13.7M or later with command API enabled
- Python Client for eAPI 0.3.0 or later
notes:
- All configuration is idempotent unless otherwise specified
- Supports eos metaparameters for using the eAPI transport
- Supports stateful resource configuration.
options:
vlanid:
description:
- The unique VLAN identifier associated with this resource. The value
for this identiifer must be in the range of 1 to 4094.
required: true
default: null
choices: []
aliases: []
version_added: 1.0.0
name:
description:
- An ASCII string identifer for this VLAN. The default value for the
VLAN name is VLANxxxx where xxxx is the four digit VLAN ID.
required: false
default: null
choices: []
aliases: []
version_added: 1.0.0
enable:
description:
- Configures the administrative state for the VLAN. If enable is True
then the VLAN is administratively enabled. If enable is False then
the VLAN is administratively disabled.
default: true
required: false
choices: ['True', 'False']
aliases: []
version_added: 1.0.0
trunk_groups:
description:
- Configures the list of trunk groups associated with the VLAN in the
node configuration. The list of trunk groups is a comma separated
list. The default value for trunk_groups is an empty list.
- "Note: The list of comma delimited values must not include spaces."
required: false
default: null
choices: []
aliases: []
version_added: 1.0.0
"""
EXAMPLES = """
- name: ensures vlan 100 is configured
eos_vlan: vlanid=100 state=present
- name: ensures vlan 200 is not configured
eos_vlan: vlanid=200 state=absent
- name: configures the vlan name
eos_vlan: vlanid=1 name=TEST_VLAN_1
- name: configure trunk groups for vlan 10
eos_vlan: vlanid=10 trunk_groups=tg1,tg2,tg3
"""
#<<EOS_COMMON_MODULE_START>>
import syslog
import collections
from ansible.module_utils.basic import *
try:
import pyeapi
PYEAPI_AVAILABLE = True
except ImportError:
PYEAPI_AVAILABLE = False
DEFAULT_SYSLOG_PRIORITY = syslog.LOG_NOTICE
DEFAULT_CONNECTION = 'localhost'
TRANSPORTS = ['socket', 'http', 'https', 'http_local']
class EosConnection(object):
__attributes__ = ['username', 'password', 'host', 'transport', 'port']
def __init__(self, **kwargs):
self.connection = kwargs['connection']
self.transport = kwargs.get('transport')
self.username = kwargs.get('username')
self.password = kwargs.get('password')
self.host = kwargs.get('host')
self.port = kwargs.get('port')
self.config = kwargs.get('config')
def connect(self):
if self.config is not None:
pyeapi.load_config(self.config)
config = dict()
if self.connection is not None:
config = pyeapi.config_for(self.connection)
if not config:
msg = 'Connection name "{}" not found'.format(self.connection)
for key in self.__attributes__:
if getattr(self, key) is not None:
config[key] = getattr(self, key)
if 'transport' not in config:
raise ValueError('Connection must define a transport')
connection = pyeapi.client.make_connection(**config)
node = pyeapi.client.Node(connection, **config)
try:
node.enable('show version')
except (pyeapi.eapilib.ConnectionError, pyeapi.eapilib.CommandError):
raise ValueError('unable to connect to {}'.format(node))
return node
class EosAnsibleModule(AnsibleModule):
meta_args = {
'config': dict(),
'username': dict(),
'password': dict(),
'host': dict(),
'connection': dict(default=DEFAULT_CONNECTION),
'transport': dict(choices=TRANSPORTS),
'port': dict(),
'debug': dict(type='bool', default='false'),
'logging': dict(type='bool', default='true')
}
stateful_args = {
'state': dict(default='present', choices=['present', 'absent']),
}
def __init__(self, stateful=True, autorefresh=False, *args, **kwargs):
kwargs['argument_spec'].update(self.meta_args)
self._stateful = stateful
if stateful:
kwargs['argument_spec'].update(self.stateful_args)
## Ok, so in Ansible 2.0,
## AnsibleModule.__init__() sets self.params and then
## calls self.log()
## (through self._log_invocation())
##
## However, self.log() (overridden in EosAnsibleModule)
## references self._logging
## and self._logging (defined in EosAnsibleModule)
## references self.params.
##
## So ... I'm defining self._logging without "or self.params['logging']"
## *before* AnsibleModule.__init__() to avoid a "ref before def".
##
## I verified that this works with Ansible 1.9.4 and 2.0.0.2.
## The only caveat is that the first log message in
## AnsibleModule.__init__() won't be subject to the value of
## self.params['logging'].
self._logging = kwargs.get('logging')
super(EosAnsibleModule, self).__init__(*args, **kwargs)
self.result = dict(changed=False, changes=dict())
self._debug = kwargs.get('debug') or self.boolean(self.params['debug'])
self._logging = kwargs.get('logging') or self.params['logging']
self.log('DEBUG flag is %s' % self._debug)
self.debug('pyeapi_version', self.check_pyeapi())
self.debug('stateful', self._stateful)
self.debug('params', self.params)
self._attributes = self.map_argument_spec()
self.validate()
self._autorefresh = autorefresh
self._node = EosConnection(**self.params)
self._node.connect()
self._node = self.connect()
self._instance = None
self.desired_state = self.params['state'] if self._stateful else None
self.exit_after_flush = kwargs.get('exit_after_flush')
@property
def instance(self):
if self._instance:
return self._instance
func = self.func('instance')
if not func:
self.fail('Module does not support "instance"')
try:
self._instance = func(self)
except Exception as exc:
self.fail('instance[error]: %s' % exc.message)
self.log("called instance: %s" % self._instance)
return self._instance
@property
def attributes(self):
return self._attributes
@property
def node(self):
return self._node
def check_pyeapi(self):
if not PYEAPI_AVAILABLE:
self.fail('Unable to import pyeapi, is it installed?')
return pyeapi.__version__
def map_argument_spec(self):
"""map_argument_spec maps only the module argument spec to attrs
This method will map the argumentspec minus the meta_args to attrs
and return the attrs. This returns a dict object that includes only
the original argspec plus the stateful_args (if self._stateful=True)
Returns:
dict: Returns a dict object that includes the original
argument_spec plus stateful_args with values minus meta_args
"""
keys = set(self.params).difference(self.meta_args)
attrs = dict()
attrs = dict([(k, self.params[k]) for k in self.params if k in keys])
if 'CHECKMODE' in attrs:
del attrs['CHECKMODE']
return attrs
def validate(self):
for key, value in self.attributes.iteritems():
func = self.func('validate_%s' % key)
if func:
self.attributes[key] = func(value)
def create(self):
if not self.check_mode:
func = self.func('create')
if not func:
self.fail('Module must define "create" function')
return self.invoke(func, self)
def remove(self):
if not self.check_mode:
func = self.func('remove')
if not func:
self.fail('Module most define "remove" function')
return self.invoke(func, self)
def flush(self, exit_after_flush=False):
self.exit_after_flush = exit_after_flush
if self.desired_state == 'present' or not self._stateful:
if self.instance.get('state') == 'absent':
changed = self.create()
self.result['changed'] = changed or True
self.refresh()
# After a create command, flush the running-config
# so we get the latest for any other attributes
self._node._running_config = None
changeset = self.attributes.viewitems() - self.instance.viewitems()
if self._debug:
self.debug('desired_state', self.attributes)
self.debug('current_state', self.instance)
changes = self.update(changeset)
if changes:
self.result['changes'] = changes
self.result['changed'] = True
self._attributes.update(changes)
flush = self.func('flush')
if flush:
self.invoke(flush, self)
elif self.desired_state == 'absent' and self._stateful:
if self.instance.get('state') == 'present':
changed = self.remove()
self.result['changed'] = changed or True
elif self._stateful:
if self.desired_state != self.instance.get('state'):
func = self.func(self.desired_state)
changed = self.invoke(func, self)
self.result['changed'] = changed or True
self.refresh()
# By calling self.instance here we trigger another show running-config
# all which causes delay. Only if debug is enabled do we call this
# since it will display the latest state of the object.
if self._debug:
self.result['instance'] = self.instance
if self.exit_after_flush:
self.exit()
def update(self, changeset):
changes = dict()
for key, value in changeset:
if value is not None:
changes[key] = value
func = self.func('set_%s' % key)
if func and not self.check_mode:
try:
self.invoke(func, self)
except Exception as exc:
self.fail(exc.message)
return changes
def connect(self):
if self.params['config']:
pyeapi.load_config(self.params['config'])
config = dict()
if self.params['connection']:
config = pyeapi.config_for(self.params['connection'])
if not config:
msg = 'Connection name "%s" not found' % self.params['connection']
self.fail(msg)
if self.params['username']:
config['username'] = self.params['username']
if self.params['password']:
config['password'] = self.params['password']
if self.params['transport']:
config['transport'] = self.params['transport']
if self.params['port']:
config['port'] = self.params['port']
if self.params['host']:
config['host'] = self.params['host']
if 'transport' not in config:
self.fail('Connection must define a transport')
connection = pyeapi.client.make_connection(**config)
self.log('Creating connection with autorefresh=%s' % self._autorefresh)
node = pyeapi.client.Node(connection, autorefresh=self._autorefresh,
**config)
try:
resp = node.enable('show version')
self.debug('eos_version', resp[0]['result']['version'])
self.debug('eos_model', resp[0]['result']['modelName'])
except (pyeapi.eapilib.ConnectionError, pyeapi.eapilib.CommandError):
self.fail('unable to connect to %s' % node)
else:
self.log('Connected to node %s' % node)
self.debug('node', str(node))
return node
def config(self, commands):
self.result['changed'] = True
if not self.check_mode:
self.node.config(commands)
def api(self, module):
return self.node.api(module)
def func(self, name):
return globals().get(name)
def invoke(self, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as exc:
self.fail(exc.message)
def invoke_function(self, name, *args, **kwargs):
func = self.func(name)
if func:
return self.invoke(func, *args, **kwargs)
def fail(self, msg):
self.invoke_function('on_fail', self)
self.log('ERROR: %s' % msg, syslog.LOG_ERR)
self.fail_json(msg=msg)
def exit(self):
self.invoke_function('on_exit', self)
self.log('Module completed successfully')
self.exit_json(**self.result)
def refresh(self):
self._instance = None
def debug(self, key, value):
if self._debug:
if 'debug' not in self.result:
self.result['debug'] = dict()
self.result['debug'][key] = value
def log(self, message, log_args=None, priority=None):
if self._logging:
syslog.openlog('ansible-eos')
priority = priority or DEFAULT_SYSLOG_PRIORITY
syslog.syslog(priority, str(message))
@classmethod
def add_state(cls, name):
cls.stateful_args['state']['choices'].append(name)
#<<EOS_COMMON_MODULE_END>>
def instance(module):
""" Returns an instance of Vlan based on vlanid
"""
vlanid = module.attributes['vlanid']
result = module.node.api('vlans').get(vlanid)
_instance = dict(vlanid=vlanid, state='absent')
if result:
_instance['state'] = 'present'
_instance['name'] = result['name']
_instance['enable'] = result['state'] == 'active'
_instance['trunk_groups'] = ','.join(result['trunk_groups'])
return _instance
def create(module):
""" Creates a new instance of a Vlan on the node
"""
name = module.attributes['vlanid']
module.log('Invoked create for eos_vlan[%s]' % name)
module.node.api('vlans').create(name)
def remove(module):
name = module.attributes['vlanid']
module.log('Invoked remove for eos_vlan[%s]' % name)
module.node.api('vlans').delete(name)
def set_name(module):
""" Configures the name attribute for the vlan id
"""
value = module.attributes['name']
vlanid = module.attributes['vlanid']
module.log('Invoked set_name for eos_vlan[%s] '
'with value %s' % (vlanid, value))
module.node.api('vlans').set_name(vlanid, value)
def set_enable(module):
""" Configures the vlan as adminstratively enabled or disabled
"""
value = 'active' if module.attributes['enable'] else 'suspend'
vlanid = module.attributes['vlanid']
module.log('Invoked set_enable for eos_vlan[%s] '
'with value %s' % (vlanid, value))
module.node.api('vlans').set_state(vlanid, value)
def validate_trunk_groups(value):
""" Sorts the trunk groups passed into the playbook. This will ensure
idempotency since the API will return the trunk groups sorted.
"""
if not value:
return ''
trunk_groups = sorted(value.split(','))
return ','.join(trunk_groups)
def set_trunk_groups(module):
""" Configures the list of trunk groups assigned to this Vlan
"""
value = module.attributes['trunk_groups'].split(',')
vlanid = module.attributes['vlanid']
module.log('Invoked set_trunk_groups for eos_vlan[%s] '
'with value %s' % (vlanid, value))
if value == '':
module.node.api('vlans').set_trunk_groups(vlanid, value, disable=True)
else:
module.node.api('vlans').set_trunk_groups(vlanid, value)
def main():
""" The main module routine called when the module is run by Ansible
"""
argument_spec = dict(
vlanid=dict(required=True),
enable=dict(type='bool', default=True),
name=dict(),
trunk_groups=dict()
)
module = EosAnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
module.flush(True)
main()
|
brutalic/pynet_brutal
|
class6/library/eos_vlan.py
|
Python
|
apache-2.0
| 18,361
|
# yellowbrick.features
# Visualizers for feature analysis and diagnostics.
#
# Author: Benjamin Bengfort <bbengfort@districtdatalabs.com>
# Created: Mon Oct 03 21:30:18 2016 -0400
#
# Copyright (C) 2016 District Data Labs
# For license information, see LICENSE.txt
#
# ID: __init__.py [0f4b236] benjamin@bengfort.com $
"""
Visualizers for feature analysis and diagnostics.
"""
##########################################################################
## Imports
##########################################################################
## Hoist visualizers into the features namespace
from .pcoords import ParallelCoordinates, parallel_coordinates
from .radviz import RadialVisualizer, RadViz, radviz
from .rankd import Rank1D, rank1d, Rank2D, rank2d
from .jointplot import JointPlot, JointPlotVisualizer, joint_plot
from .pca import PCADecomposition, pca_decomposition
from .importances import FeatureImportances, feature_importances
from .rfecv import RFECV, rfecv
from .manifold import Manifold, manifold_embedding
|
pdamodaran/yellowbrick
|
yellowbrick/features/__init__.py
|
Python
|
apache-2.0
| 1,026
|
# Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.test import TransactionTestCase
from myuw.models import VisitedLinkNew, CustomLink, PopularLink, User
from myuw.test import get_request_with_user
from myuw.dao.user import get_user_model
from myuw.dao.affiliation import get_all_affiliations
from myuw.dao.quicklinks import get_quicklink_data, get_link_label,\
add_custom_link, delete_custom_link, edit_custom_link,\
add_hidden_link, delete_hidden_link, get_popular_link_by_id,\
get_recent_link_by_id
from myuw.test import get_request_with_user
class TestQuickLinkDAO(TransactionTestCase):
def test_recent_filtering(self):
def _get_recent(data):
recent = set()
for link in data['recent_links']:
recent.add(link['url'])
return recent
username = 'none'
req = get_request_with_user(username)
user = get_user_model(req)
u1 = 'http://example.com?q=1'
u2 = 'http://example.com?q=2'
v1 = VisitedLinkNew.objects.create(user=user, url=u1)
self.assertTrue(get_recent_link_by_id(req, v1.pk))
v2 = VisitedLinkNew.objects.create(user=user, url=u2)
data = get_quicklink_data(req)
recent = _get_recent(data)
self.assertEquals(len(recent), 2)
self.assertTrue(u1 in recent)
self.assertTrue(u2 in recent)
plink = PopularLink.objects.create(url=u2)
self.assertTrue(get_popular_link_by_id(plink.pk))
self.assertIsNotNone(plink.json_data())
self.assertIsNotNone(str(plink))
data = get_quicklink_data(req)
recent = _get_recent(data)
self.assertEquals(len(recent), 1)
self.assertTrue(u1 in recent)
CustomLink.objects.create(user=user, url=u1)
data = get_quicklink_data(req)
recent = _get_recent(data)
self.assertEquals(len(recent), 0)
for i in range(10):
VisitedLinkNew.objects.create(user=user,
url="http://example.com?q=%s" % i)
data = get_quicklink_data(req)
recent = _get_recent(data)
self.assertEquals(len(recent), 5)
def test_link_label_override(self):
req = get_request_with_user('none')
user = get_user_model(req)
data = {"user": user,
"url": "http://example.com?q=replaceit",
"label": "Original"}
l1 = VisitedLinkNew.objects.create(**data)
self.assertEquals(get_link_label(l1), "Row For Unit Tests")
l1 = VisitedLinkNew.objects.create(user=user,
url="http://example.com?q=whatever",
label="Original")
self.assertEquals(get_link_label(l1), "Original")
def test_hidden_link(self):
req = get_request_with_user('none')
url = "http://s.ss.edu"
link = add_hidden_link(req, url)
self.assertEquals(link.url, url)
# second time
link1 = add_hidden_link(req, url)
self.assertEquals(link.pk, link1.pk)
self.assertIsNotNone(delete_hidden_link(req, link.pk))
# second time
self.assertIsNone(delete_hidden_link(req, link.pk))
def test_add_custom_link(self):
username = 'none'
req = get_request_with_user(username)
link = add_custom_link(req, "http://s1.ss.edu")
self.assertIsNone(link.label)
url = "http://s.ss.edu"
link_label = "ss"
link1 = add_custom_link(req, url, link_label)
self.assertEquals(link1.url, url)
self.assertEquals(link1.label, link_label)
# second time
link2 = add_custom_link(req, url, link_label)
self.assertEquals(link2.pk, link1.pk)
def test_delete_custom_link(self):
username = 'none'
req = get_request_with_user(username)
url = "http://s.ss.edu"
link = add_custom_link(req, url)
self.assertIsNotNone(delete_custom_link(req, link.pk))
# second time
self.assertIsNone(delete_custom_link(req, link.pk))
def test_edit_custom_link(self):
username = 'none'
req = get_request_with_user(username)
url = "http://s.ss.edu"
link = add_custom_link(req, url)
url1 = "http://s1.ss.edu"
link1 = edit_custom_link(req, link.pk, url1)
self.assertEquals(link1.url, url1)
url2 = "http://s2.ss.edu"
label2 = "s2"
link2 = edit_custom_link(req, link1.pk, url2, label2)
self.assertIsNotNone(link2)
self.assertEquals(link2.label, label2)
def test_get_quicklink_data(self):
data = {
"affiliation": "student",
"url": "http://iss1.washington.edu/",
"label": "ISS1",
"campus": "seattle",
"pce": False,
"affiliation": "{intl_stud: True}",
}
plink = PopularLink.objects.create(**data)
username = "jinter"
req = get_request_with_user(username)
affiliations = get_all_affiliations(req)
user = get_user_model(req)
link_data = {
"user": user,
"url": "http://iss.washington.edu/",
"label": "ISS1",
"is_anonymous": False,
"is_student": affiliations.get('student', False),
"is_undegrad": affiliations.get('undergrad', False),
"is_grad_student": affiliations.get('grad', False),
"is_employee": affiliations.get('employee', False),
"is_faculty": affiliations.get('faculty', False),
"is_seattle": affiliations.get('seattle', False),
"is_tacoma": affiliations.get('tacoma', False),
"is_bothell": affiliations.get('bothell', False),
"is_pce": affiliations.get('pce', False),
"is_student_employee": affiliations.get('stud_employee',
False),
"is_intl_stud": affiliations.get('intl_stud', False)
}
l1 = VisitedLinkNew.objects.create(**link_data)
qls = get_quicklink_data(req)
self.assertEqual(qls['recent_links'][0]['label'], "ISS1")
self.assertEqual(qls['default_links'][0]['label'],
"International Student Services (ISS)")
def test_bot_quicklinks(self):
username = "botgrad"
req = get_request_with_user(username)
bot_qls = get_quicklink_data(req)
self.assertEqual(bot_qls['default_links'][0]['url'],
"http://www.uwb.edu/cie")
def test_tac_quicklinks(self):
username = "tacgrad"
req = get_request_with_user(username)
tac_qls = get_quicklink_data(req)
self.assertEqual(tac_qls['default_links'][0]['label'],
"International Student and Scholar Services (ISSS)")
def test_MUWM_4760(self):
req = get_request_with_user('bill')
data = get_quicklink_data(req)
self.assertTrue(data['instructor'])
self.assertTrue(data['sea_emp'])
self.assertFalse(data['student'])
req = get_request_with_user('javerage')
data = get_quicklink_data(req)
self.assertFalse(data['instructor'])
self.assertTrue(data['student'])
self.assertFalse(data['bot_student'])
self.assertFalse(data['tac_student'])
self.assertTrue(data['sea_student'])
self.assertTrue(data['sea_emp'])
self.assertFalse(data['bot_emp'])
self.assertFalse(data['tac_emp'])
req = get_request_with_user('jbothell')
data = get_quicklink_data(req)
self.assertTrue(data['student'])
self.assertTrue(data['bot_student'])
req = get_request_with_user('eight')
data = get_quicklink_data(req)
self.assertTrue(data['student'])
self.assertTrue(data['tac_student'])
self.assertTrue(data['instructor'])
self.assertTrue(data['sea_emp'])
|
uw-it-aca/myuw
|
myuw/test/dao/test_quicklinks.py
|
Python
|
apache-2.0
| 8,048
|
import tempfile
import synapse.common as s_common
import synapse.lib.msgpack as s_msgpack
_readsz = 10000000
def splice(act, **info):
'''
Form a splice event from a given act name and info.
Args:
act (str): The name of the action.
**info: Additional information about the event.
Example:
splice = splice('add:node', form='inet:ipv4', valu=0)
self.fire(splice)
Notes:
Splice events were reworked in v0.0.45 and now contain a sub-event of
the (act, info) under the 'mesg' key.
Returns:
(str, dict): The splice event.
'''
return (act, info)
def convertOldSplice(mesg):
'''
Converts an "old" splice event to the "new" format.
Args:
mesg ((str,dict)): An event tuple.
Examples:
Convert a splice to the new format:
newsplice = convertOldSplice(oldsplice)
Raises:
(BadSpliceMesg): The splice was unable to be converted.
Returns:
(str, dict): The splice event.
'''
if not(isinstance(mesg, tuple) and len(mesg) is 2):
raise s_common.BadSpliceMesg('invalid event mesg')
evtname = mesg[0]
if evtname != 'splice':
raise s_common.BadSpliceMesg('event mesg is not a splice')
data = mesg[1]
if data.get('mesg'):
raise s_common.BadSpliceMesg('splice has already been converted')
act = mesg[1].pop('act', None)
if not act:
raise s_common.BadSpliceMesg('splice is missing act')
return splice(act, **data)
def convertSpliceFd(fpath):
'''
Converts an "old" splice log to the new format.
Args:
fpath (str): The path to the "old" splice log file.
Example:
convertSpliceFd('/stuff/oldsplicelog.mpk')
Notes:
This function reads the an "old" splice log file, writes to a temporary
file, and then overwrites the old file with the new data. This function
only converts old splices to new splices. If any messages are invalid,
an exception will be raised and the conversion will exit early and not
overwrite any data.
Returns:
None
'''
with tempfile.SpooledTemporaryFile() as tmp:
with open(fpath, 'r+b') as fd:
for chnk in s_common.chunks(s_msgpack.iterfd(fd), 1000):
for mesg in chnk:
mesg = convertOldSplice(mesg)
tmp.write(s_msgpack.en(mesg))
tmp.seek(0)
fd.seek(0)
data = tmp.read(_readsz)
while data:
fd.write(data)
data = tmp.read(_readsz)
fd.truncate()
|
vivisect/synapse
|
synapse/lib/splice.py
|
Python
|
apache-2.0
| 2,652
|
"""The test for the bayesian sensor platform."""
import json
import unittest
from homeassistant.components.bayesian import binary_sensor as bayesian
from homeassistant.components.homeassistant import (
DOMAIN as HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
)
from homeassistant.const import ATTR_ENTITY_ID, STATE_UNKNOWN
from homeassistant.setup import async_setup_component, setup_component
from tests.common import get_test_home_assistant
class TestBayesianBinarySensor(unittest.TestCase):
"""Test the threshold sensor."""
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def test_load_values_when_added_to_hass(self):
"""Test that sensor initializes with observations of relevant entities."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "off",
"prob_given_true": 0.8,
"prob_given_false": 0.4,
}
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
self.hass.states.set("sensor.test_monitored", "off")
self.hass.block_till_done()
assert setup_component(self.hass, "binary_sensor", config)
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert state.attributes.get("observations")[0]["prob_given_true"] == 0.8
assert state.attributes.get("observations")[0]["prob_given_false"] == 0.4
def test_unknown_state_does_not_influence_probability(self):
"""Test that an unknown state does not change the output probability."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "off",
"prob_given_true": 0.8,
"prob_given_false": 0.4,
}
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
self.hass.states.set("sensor.test_monitored", STATE_UNKNOWN)
self.hass.block_till_done()
assert setup_component(self.hass, "binary_sensor", config)
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert state.attributes.get("observations") == []
def test_sensor_numeric_state(self):
"""Test sensor on numeric state platform observations."""
config = {
"binary_sensor": {
"platform": "bayesian",
"name": "Test_Binary",
"observations": [
{
"platform": "numeric_state",
"entity_id": "sensor.test_monitored",
"below": 10,
"above": 5,
"prob_given_true": 0.6,
},
{
"platform": "numeric_state",
"entity_id": "sensor.test_monitored1",
"below": 7,
"above": 5,
"prob_given_true": 0.9,
"prob_given_false": 0.1,
},
],
"prior": 0.2,
}
}
assert setup_component(self.hass, "binary_sensor", config)
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", 4)
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert [] == state.attributes.get("observations")
assert 0.2 == state.attributes.get("probability")
assert state.state == "off"
self.hass.states.set("sensor.test_monitored", 6)
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", 4)
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", 6)
self.hass.states.set("sensor.test_monitored1", 6)
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert state.attributes.get("observations")[0]["prob_given_true"] == 0.6
assert state.attributes.get("observations")[1]["prob_given_true"] == 0.9
assert state.attributes.get("observations")[1]["prob_given_false"] == 0.1
assert round(abs(0.77 - state.attributes.get("probability")), 7) == 0
assert state.state == "on"
self.hass.states.set("sensor.test_monitored", 6)
self.hass.states.set("sensor.test_monitored1", 0)
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", 4)
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert 0.2 == state.attributes.get("probability")
assert state.state == "off"
self.hass.states.set("sensor.test_monitored", 15)
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert state.state == "off"
def test_sensor_state(self):
"""Test sensor on state platform observations."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "off",
"prob_given_true": 0.8,
"prob_given_false": 0.4,
}
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
assert setup_component(self.hass, "binary_sensor", config)
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "on")
state = self.hass.states.get("binary_sensor.test_binary")
assert [] == state.attributes.get("observations")
assert 0.2 == state.attributes.get("probability")
assert state.state == "off"
self.hass.states.set("sensor.test_monitored", "off")
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "on")
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "off")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert state.attributes.get("observations")[0]["prob_given_true"] == 0.8
assert state.attributes.get("observations")[0]["prob_given_false"] == 0.4
assert round(abs(0.33 - state.attributes.get("probability")), 7) == 0
assert state.state == "on"
self.hass.states.set("sensor.test_monitored", "off")
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "on")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert round(abs(0.2 - state.attributes.get("probability")), 7) == 0
assert state.state == "off"
def test_sensor_value_template(self):
"""Test sensor on template platform observations."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "template",
"value_template": "{{states('sensor.test_monitored') == 'off'}}",
"prob_given_true": 0.8,
"prob_given_false": 0.4,
}
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
assert setup_component(self.hass, "binary_sensor", config)
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "on")
state = self.hass.states.get("binary_sensor.test_binary")
assert [] == state.attributes.get("observations")
assert 0.2 == state.attributes.get("probability")
assert state.state == "off"
self.hass.states.set("sensor.test_monitored", "off")
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "on")
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "off")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert state.attributes.get("observations")[0]["prob_given_true"] == 0.8
assert state.attributes.get("observations")[0]["prob_given_false"] == 0.4
assert round(abs(0.33 - state.attributes.get("probability")), 7) == 0
assert state.state == "on"
self.hass.states.set("sensor.test_monitored", "off")
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "on")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert round(abs(0.2 - state.attributes.get("probability")), 7) == 0
assert state.state == "off"
def test_threshold(self):
"""Test sensor on probability threshold limits."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "on",
"prob_given_true": 1.0,
}
],
"prior": 0.5,
"probability_threshold": 1.0,
}
}
assert setup_component(self.hass, "binary_sensor", config)
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "on")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert round(abs(1.0 - state.attributes.get("probability")), 7) == 0
assert state.state == "on"
def test_multiple_observations(self):
"""Test sensor with multiple observations of same entity."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "blue",
"prob_given_true": 0.8,
"prob_given_false": 0.4,
},
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "red",
"prob_given_true": 0.2,
"prob_given_false": 0.4,
},
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
assert setup_component(self.hass, "binary_sensor", config)
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "off")
state = self.hass.states.get("binary_sensor.test_binary")
for key, attrs in state.attributes.items():
json.dumps(attrs)
assert [] == state.attributes.get("observations")
assert 0.2 == state.attributes.get("probability")
assert state.state == "off"
self.hass.states.set("sensor.test_monitored", "blue")
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "off")
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "blue")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert state.attributes.get("observations")[0]["prob_given_true"] == 0.8
assert state.attributes.get("observations")[0]["prob_given_false"] == 0.4
assert round(abs(0.33 - state.attributes.get("probability")), 7) == 0
assert state.state == "on"
self.hass.states.set("sensor.test_monitored", "blue")
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "red")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert round(abs(0.11 - state.attributes.get("probability")), 7) == 0
assert state.state == "off"
def test_probability_updates(self):
"""Test probability update function."""
prob_given_true = [0.3, 0.6, 0.8]
prob_given_false = [0.7, 0.4, 0.2]
prior = 0.5
for pt, pf in zip(prob_given_true, prob_given_false):
prior = bayesian.update_probability(prior, pt, pf)
assert round(abs(0.720000 - prior), 7) == 0
prob_given_true = [0.8, 0.3, 0.9]
prob_given_false = [0.6, 0.4, 0.2]
prior = 0.7
for pt, pf in zip(prob_given_true, prob_given_false):
prior = bayesian.update_probability(prior, pt, pf)
assert round(abs(0.9130434782608695 - prior), 7) == 0
def test_observed_entities(self):
"""Test sensor on observed entities."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "off",
"prob_given_true": 0.9,
"prob_given_false": 0.4,
},
{
"platform": "template",
"value_template": "{{is_state('sensor.test_monitored1','on') and is_state('sensor.test_monitored','off')}}",
"prob_given_true": 0.9,
},
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
assert setup_component(self.hass, "binary_sensor", config)
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "on")
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored1", "off")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert [] == state.attributes.get("occurred_observation_entities")
self.hass.states.set("sensor.test_monitored", "off")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert ["sensor.test_monitored"] == state.attributes.get(
"occurred_observation_entities"
)
self.hass.states.set("sensor.test_monitored1", "on")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert ["sensor.test_monitored", "sensor.test_monitored1"] == sorted(
state.attributes.get("occurred_observation_entities")
)
def test_state_attributes_are_serializable(self):
"""Test sensor on observed entities."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "off",
"prob_given_true": 0.9,
"prob_given_false": 0.4,
},
{
"platform": "template",
"value_template": "{{is_state('sensor.test_monitored1','on') and is_state('sensor.test_monitored','off')}}",
"prob_given_true": 0.9,
},
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
assert setup_component(self.hass, "binary_sensor", config)
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "on")
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored1", "off")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert [] == state.attributes.get("occurred_observation_entities")
self.hass.states.set("sensor.test_monitored", "off")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert ["sensor.test_monitored"] == state.attributes.get(
"occurred_observation_entities"
)
self.hass.states.set("sensor.test_monitored1", "on")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert ["sensor.test_monitored", "sensor.test_monitored1"] == sorted(
state.attributes.get("occurred_observation_entities")
)
for key, attrs in state.attributes.items():
json.dumps(attrs)
async def test_template_error(hass, caplog):
"""Test sensor with template error."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "template",
"value_template": "{{ xyz + 1 }}",
"prob_given_true": 0.9,
},
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test_binary").state == "off"
assert "TemplateError" in caplog.text
assert "xyz" in caplog.text
async def test_update_request_with_template(hass):
"""Test sensor on template platform observations that gets an update request."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "template",
"value_template": "{{states('sensor.test_monitored') == 'off'}}",
"prob_given_true": 0.8,
"prob_given_false": 0.4,
}
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
await async_setup_component(hass, "binary_sensor", config)
await async_setup_component(hass, HA_DOMAIN, {})
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test_binary").state == "off"
await hass.services.async_call(
HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
{ATTR_ENTITY_ID: "binary_sensor.test_binary"},
blocking=True,
)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test_binary").state == "off"
async def test_update_request_without_template(hass):
"""Test sensor on template platform observations that gets an update request."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "off",
"prob_given_true": 0.9,
"prob_given_false": 0.4,
},
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
await async_setup_component(hass, "binary_sensor", config)
await async_setup_component(hass, HA_DOMAIN, {})
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", "on")
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test_binary").state == "off"
await hass.services.async_call(
HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
{ATTR_ENTITY_ID: "binary_sensor.test_binary"},
blocking=True,
)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test_binary").state == "off"
async def test_monitored_sensor_goes_away(hass):
"""Test sensor on template platform observations that goes away."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "on",
"prob_given_true": 0.9,
"prob_given_false": 0.4,
},
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
await async_setup_component(hass, "binary_sensor", config)
await async_setup_component(hass, HA_DOMAIN, {})
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", "on")
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test_binary").state == "on"
hass.states.async_remove("sensor.test_monitored")
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test_binary").state == "on"
|
titilambert/home-assistant
|
tests/components/bayesian/test_binary_sensor.py
|
Python
|
apache-2.0
| 22,326
|
from synapse.tests.common import *
import synapse.lib.trees as s_trees
class TreeTest(SynTest):
def test_lib_tree_interval(self):
ivals = (
((-30, 50), {'name': 'foo'}),
((30, 100), {'name': 'bar'}),
((80, 100), {'name': 'baz'}),
)
itree = s_trees.IntervalTree(ivals)
#import pprint
#pprint.pprint(itree.root)
# test a multi-level overlap
names = [ival[1].get('name') for ival in itree.get(32)]
self.eq(names, ['foo', 'bar'])
# 90 ends up as a center in the tree...
names = [ival[1].get('name') for ival in itree.get(90)]
self.eq(names, ['bar', 'baz'])
# test an exact overlap on min
names = [ival[1].get('name') for ival in itree.get(80)]
self.eq(names, ['bar', 'baz'])
# test an exact overlap on max
names = [ival[1].get('name') for ival in itree.get(100)]
self.eq(names, ['bar', 'baz'])
self.eq(itree.get(-31), [])
self.eq(itree.get(101), [])
self.eq(itree.get(0xffffffff), [])
|
vivisect/synapse
|
synapse/tests/test_lib_trees.py
|
Python
|
apache-2.0
| 1,092
|
#!/usr/bin/env python2.5
#
# Unit tester for neural_net.py
#
import sys
from neural_net import train, test,\
make_neural_net_basic,\
make_neural_net_two_layer,\
make_neural_net_challenging,\
make_neural_net_with_weights
from neural_net_data import simple_data_sets,\
harder_data_sets,\
challenging_data_sets,\
manual_weight_data_sets,\
all_data_sets
def main(neural_net_func, data_sets, max_iterations=10000):
verbose = True
for name, training_data, test_data in data_sets:
print "-"*40
print "Training on %s data" %(name)
nn = neural_net_func()
train(nn, training_data, max_iterations=max_iterations,
verbose=verbose)
print "Trained weights:"
for w in nn.weights:
print "Weight '%s': %f"%(w.get_name(),w.get_value())
print "Testing on %s test-data" %(name)
result = test(nn, test_data, verbose=verbose)
print "Accuracy: %f"%(result)
if __name__=="__main__":
test_names = ["simple"]
if len(sys.argv) > 1:
test_names = sys.argv[1:]
for test_name in test_names:
if test_name == "simple":
# these test simple logical configurations
main(make_neural_net_basic,
simple_data_sets)
elif test_name == "two_layer":
# these test cases are slightly harder
main(make_neural_net_two_layer,
simple_data_sets + harder_data_sets)
elif test_name == "challenging":
# these tests require a more complex architecture.
main(make_neural_net_challenging, challenging_data_sets)
elif test_name == "patchy":
# patchy problem is slightly tricky
# unless your network gets the right weights.
# it can quickly get stuck in local maxima.
main(make_neural_net_challenging, manual_weight_data_sets)
elif test_name == "weights":
# if you set the 'right' weights for
# the patchy problem it can converge very quickly.
main(make_neural_net_with_weights, manual_weight_data_sets,100)
else:
print "unrecognized test name %s" %(test_name)
|
joshishungry/artificial_intel
|
assignments/lab5/neural_net_tester.py
|
Python
|
apache-2.0
| 2,227
|
# -*- coding: utf-8 -*-
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
googleapis/python-bigquery-storage
|
samples/snippets/__init__.py
|
Python
|
apache-2.0
| 601
|
#!/usr/bin/python
#
# convert .po to .js
#
import json
import optparse
import os
import polib
import re
import string
import sys
parser = optparse.OptionParser(usage="usage: %prog [options] pofile...")
parser.add_option("--callback", default="_.setTranslation", dest="callback", help="callback function to call with data")
parser.add_option("--quiet", action="store_false", default=True, dest="verbose", help="don't print status messages to stdout")
(options, args) = parser.parse_args()
if args == None or len(args) == 0:
print("ERROR: you must specify at least one po file to translate");
sys.exit(1)
paramFix = re.compile("(\\(([0-9])\\))")
for srcfile in args:
destfile = os.path.splitext(srcfile)[0] + ".js"
if options.verbose:
print("INFO: converting %s to %s" % (srcfile, destfile))
xlate_map = {}
po = polib.pofile(srcfile, autodetect_encoding=False, encoding="utf-8", wrapwidth=-1)
for entry in po:
if entry.obsolete or entry.msgstr == '':
continue
xlate_map[entry.msgid] = entry.msgstr;
dest = open(destfile, "w")
dest.write('i18n = ')
encoder = json.JSONEncoder()
for part in encoder.iterencode(xlate_map):
if part.startswith('"function('):
dest.write(part[1:-1]);
else:
dest.write(part);
dest.write(";\n")
dest.close()
|
ToureNPlaner/tourenplaner-web
|
js/lang/po2js.py
|
Python
|
apache-2.0
| 1,277
|
import datetime
from ...place import Place
from ...spec import Spec
from .planet import Planet
from .dwarfplanet import DwarfPlanet
class System(Place):
"""Systems exist within galaxies, and can contain planets...
Attributes
allowedChildEntities Entity spec types that can be created from this context
spec Spec type of this Entity"""
# Things that child class SHOULDNT need to redeclare
# Things that a few child classes will need to redeclare
allowedChildEntities = [Spec.PLANET, Spec.DWARFPLANET]
# Things every child class will want to redeclare
spec = Spec.SYSTEM
# ---- Methods ---- #
def initEntityFromSpec(self, spec, key, path):
"""Attempt to initialize a specific entity using the spec type.
Will likely redefine in Places.
Arguments
spec Spec type for new entity
key Key for new entity
path Path for new entity
Return
Entity"""
if (spec == spec.PLANET):
planet = Planet(key, path)
return planet
if (spec == spec.DWARFPLANET):
dwarfPlanet = DwarfPlanet(key, path)
return dwarfPlanet
raise ContextEntityConflictError("No matching child-entity for '" + self.getSpecString() + " with spec " + spec.name)
|
Jerad-M/ubfs
|
classes/custom/place/system.py
|
Python
|
apache-2.0
| 1,201
|
# - # Copyright 2016 Max Fischer
# - #
# - # Licensed under the Apache License, Version 2.0 (the "License");
# - # you may not use this file except in compliance with the License.
# - # You may obtain a copy of the License at
# - #
# - # http://www.apache.org/licenses/LICENSE-2.0
# - #
# - # Unless required by applicable law or agreed to in writing, software
# - # distributed under the License is distributed on an "AS IS" BASIS,
# - # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# - # See the License for the specific language governing permissions and
# - # limitations under the License.
"""
Instances of :py:class:`~.TwinMainModule` define how ``__main__`` is
bootstrapped in twinterpreters. A
:py:class:`~cpy2py.twinterpeter.twin_master.TwinMaster` without
a :py:class:`~.TwinMainModule` defaults to :py:data:`DEFAULT_DEF`.
Bootstrap the ``__main__`` module
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The ``__main__`` module is the starting point of every python script or
application. Bootstrapping it into twinterpreters allows the recreation
of a similar environment without explicitly using :py:mod:`~cpy2py`.
In addition, smaller scripts may *only* consist of a
single script/module/file. Bootstrapping ``__main__`` gives twinterpreters
direct access to objects of this script. This is required to dispatch any
object defined only in the script.
Since ``__main__`` may have side effects and require adjustments for
different interpreters, it is not safe to execute it unconditionally.
The default is to bootstrap ``__main__`` as a module, which should be
safe for properly written applications.
Defining the ``__main__`` module
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The parameter ``main_module`` identifies the ``__main__`` module for
lookup in a twinterpreter.
It may be specified explicitly, automatically detected or turned off.
**File Path**
Filesystem path to a module to execute, e.g. "/foo/bar.py". Will be
run like invoking "python /foo/bar.py". Useful for an absolute
definition of ``__main__`` across all twinterpreters.
**Module Name**
Python module name/path to a module to execute, e.g. "foo.bar". Will be
run like invoking "python -m foo.bar". Useful for interpreter-specific
definitions of ``__main__``, e.g. respecting different python versions.
:py:attr:`~.TwinMainModule.FETCH_PATH`
Use the file path of ``__main__``.
:py:attr:`~.TwinMainModule.FETCH_NAME`
Use the module name of ``__main__``.
:py:const:`True`
Try using :py:attr:`FETCH_NAME` if possible. Otherwise, use :py:attr:`FETCH_PATH`.
:py:const:`None`
Do not bootstrap ``__main__``.
Execution of the ``__main__`` module
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Setting ``run_main`` defines whether ``__main__`` is actually executed as a script;
that is, it satisfies ``__name__ == "__main__"``. Active code in ``__main__`` should
usually be guarded against running when imported as a module.
In most cases, twinterpreters and other elements are likely created from active code
in ``__main__``. Rerunning this in twinterpreters would duplicate such elements. The
default is thus to avoid executing such code again.
However, the simplest method of creating the same environment across twinterpreters
is to have ``__main__`` be aware of whether its run in the master or not. If this is
the case, setting ``run_main=True`` avoids the need to explicitly
use initializers/finalizers.
"""
import sys
import os
import runpy
import types
import logging
from cpy2py.kernel import state as kernel_state
class TwinMainModule(object):
"""
Definition on bootstrapping a program into a twinterpreter
:param main_module: module path, name or directive to fetch ``__main__``
:type main_module: str, bool or None
:param run_main: bootstrap ``__main__`` with ``__name__ == "__main__"``
:type run_main: bool
:param restore_argv: whether to replicate the parent ``sys.argv``
:type restore_argv: bool
When bootstrapping, two magic modules are created:
**__cpy2py_bootstrap__**
The bootstrapping module starting the cpy2py process.
**__cpy2py_main__**
The __main__ module loaded and started by cpy2py.
"""
FETCH_PATH = "Use __main__ via absolute path"
FETCH_NAME = "Use __main__ via module name"
#: alias of application __main__ module
app_main_alias = '__cpy2py_main__'
#: alias of cpy2py __main__ module
cpy2py_main_alias = '__cpy2py_bootstrap__'
def __init__(self, main_module=True, run_main=False, restore_argv=False):
self.main_module = main_module
self.run_main = run_main
self._argv = None # argv EXCLUDING first element (executable name)
# path[0] might get modified, but we fetch it now to stay consistent
self._main_path = os.path.abspath(sys.path[0])
self.restore_argv = restore_argv
self._logger = logging.getLogger('__cpy2py__.main.%s' % kernel_state.TWIN_ID)
def _resolve_main(self, main_module):
if main_module is True:
try:
return self._get_main_name()
except ValueError:
return os.path.abspath(sys.modules['__main__'].__file__)
if main_module == self.FETCH_PATH:
return os.path.abspath(sys.modules['__main__'].__file__)
elif main_module == self.FETCH_NAME:
return self._get_main_name()
return main_module
def _get_main_name(self):
"""
Get the module name of ``__main__``
:raises: :py:exc:`ValueError` if ``__main__`` does not provide its name
:returns: full module/package name of ``__main__``
"""
main = sys.modules['__main__']
try:
return main.__spec__.name
except AttributeError:
pass
try:
package, name = main.__package__, os.path.splitext(os.path.basename(main.__file__))[0]
if package is None and os.path.abspath(os.path.dirname(main.__file__)) != os.path.abspath(self._main_path):
raise AttributeError
except AttributeError:
raise ValueError("Cannot derive module name if __main__ not run as module/package (see 'python -m')")
else:
return (package + '.' + name) if package else name
def __getstate__(self):
# Issue#12: Twinterpreters cannot reference __main__ objects
# Twins refer to the main module via module name aliases, so
# the master needs the same aliases. We create them before
# spawning the first twin, which requires pickling us.
if self.app_main_alias not in sys.modules:
self._bootstrap_none()
return {
'main_module': self._resolve_main(self.main_module),
'run_main': self.run_main,
'restore_argv': self.restore_argv,
'_argv': sys.argv[1:] if self.restore_argv else [],
'_main_path': self._main_path,
}
def __setstate__(self, state): # pragma: no cover bootstrap
self.__dict__.update(state)
self._logger = logging.getLogger('__cpy2py__.main.%s' % kernel_state.TWIN_ID)
def bootstrap(self): # pragma: no cover bootstrap
"""Bootstrap the parent main environment into the current process"""
# all of these are set by unpickling in a spawning child process
assert self.main_module != self.FETCH_NAME and self.main_module != self.FETCH_PATH and self._argv is not None,\
"Cannot bootstrap sys.argv in initial environment"
self._logger.warning('<%s> Bootstrapping __main__ via %r', kernel_state.TWIN_ID, self)
# until now, path[0] is the cpy2py bootstrapper dir
# replace it with the dir that main resided in, if any
sys.path[0] = self._main_path
if self.restore_argv:
self._logger.info('<%s> Restoring sys.argv', kernel_state.TWIN_ID)
sys.argv[1:] = self._argv[:]
if self.main_module is None:
self._bootstrap_none()
elif os.path.exists(self.main_module):
self._bootstrap_path(str(self.main_module))
else:
self._bootstrap_name(str(self.main_module))
def _bootstrap_none(self):
self._logger.info(
'<%s> Aliasing __main__ module to %r, %r', kernel_state.TWIN_ID, self.app_main_alias, self.cpy2py_main_alias
)
sys.modules[self.app_main_alias] = sys.modules[self.cpy2py_main_alias] = sys.modules['__main__']
def _bootstrap_path(self, main_path): # pragma: no cover bootstrap
# ipython - see https://github.com/ipython/ipython/issues/4698
# utrunner - PyCharm unittests
if os.path.splitext(os.path.basename(main_path))[0] in ('ipython', 'utrunner'):
return self._bootstrap_none()
main_name = '__main__' if self.run_main else self.app_main_alias
self._logger.info('<%s> Loading __main__ module as %s from oath %r', kernel_state.TWIN_ID, main_name, main_path)
main_dict = runpy.run_path(main_path, run_name=main_name)
self._bootstrap_set_main(main_dict)
def _bootstrap_name(self, mod_name): # pragma: no cover bootstrap
# guard against running __main__ files of packages
if not self.run_main and (mod_name == "__main__" or mod_name.endswith(".__main__")):
return self._bootstrap_none()
main_name = '__main__' if self.run_main else self.app_main_alias
self._logger.info('<%s> Loading __main__ module as %s from name %r', kernel_state.TWIN_ID, main_name, mod_name)
main_dict = runpy.run_module(mod_name, run_name=main_name)
self._bootstrap_set_main(main_dict)
def _bootstrap_set_main(self, main_dict): # pragma: no cover bootstrap
sys.modules[self.cpy2py_main_alias] = sys.modules['__main__']
main_module = types.ModuleType(self.app_main_alias)
main_module.__dict__.update(main_dict)
sys.modules['__main__'] = sys.modules[self.app_main_alias] = main_module
def __repr__(self):
return "%s(main_module=%r, run_main=%r, restore_argv=%r)" % (
self.__class__.__name__, self.main_module, self.run_main, self.restore_argv
)
def __eq__(self, other):
try:
return self.main_module == other.main_module\
and self.run_main == other.run_main
except AttributeError:
return NotImplemented
def __ne__(self, other):
return not self == other
#: The default :py:class:`TwinMainModule` instance to use
DEFAULT_DEF = TwinMainModule()
|
maxfischer2781/cpy2py
|
cpy2py/twinterpreter/main_module.py
|
Python
|
apache-2.0
| 10,585
|
from __future__ import unicode_literals
import json
import xmltodict
from jinja2 import Template
from six import iteritems
from moto.core.responses import BaseResponse
from .models import redshift_backends
def convert_json_error_to_xml(json_error):
error = json.loads(json_error)
code = error["Error"]["Code"]
message = error["Error"]["Message"]
template = Template(
"""
<RedshiftClientError>
<Error>
<Code>{{ code }}</Code>
<Message>{{ message }}</Message>
<Type>Sender</Type>
</Error>
<RequestId>6876f774-7273-11e4-85dc-39e55ca848d1</RequestId>
</RedshiftClientError>"""
)
return template.render(code=code, message=message)
def itemize(data):
"""
The xmltodict.unparse requires we modify the shape of the input dictionary slightly. Instead of a dict of the form:
{'key': ['value1', 'value2']}
We must provide:
{'key': {'item': ['value1', 'value2']}}
"""
if isinstance(data, dict):
ret = {}
for key in data:
ret[key] = itemize(data[key])
return ret
elif isinstance(data, list):
return {"item": [itemize(value) for value in data]}
else:
return data
class RedshiftResponse(BaseResponse):
@property
def redshift_backend(self):
return redshift_backends[self.region]
def get_response(self, response):
if self.request_json:
return json.dumps(response)
else:
xml = xmltodict.unparse(itemize(response), full_document=False)
if hasattr(xml, "decode"):
xml = xml.decode("utf-8")
return xml
def call_action(self):
status, headers, body = super(RedshiftResponse, self).call_action()
if status >= 400 and not self.request_json:
body = convert_json_error_to_xml(body)
return status, headers, body
def unpack_complex_list_params(self, label, names):
unpacked_list = list()
count = 1
while self._get_param("{0}.{1}.{2}".format(label, count, names[0])):
param = dict()
for i in range(len(names)):
param[names[i]] = self._get_param(
"{0}.{1}.{2}".format(label, count, names[i])
)
unpacked_list.append(param)
count += 1
return unpacked_list
def unpack_list_params(self, label):
unpacked_list = list()
count = 1
while self._get_param("{0}.{1}".format(label, count)):
unpacked_list.append(self._get_param("{0}.{1}".format(label, count)))
count += 1
return unpacked_list
def _get_cluster_security_groups(self):
cluster_security_groups = self._get_multi_param("ClusterSecurityGroups.member")
if not cluster_security_groups:
cluster_security_groups = self._get_multi_param(
"ClusterSecurityGroups.ClusterSecurityGroupName"
)
return cluster_security_groups
def _get_vpc_security_group_ids(self):
vpc_security_group_ids = self._get_multi_param("VpcSecurityGroupIds.member")
if not vpc_security_group_ids:
vpc_security_group_ids = self._get_multi_param(
"VpcSecurityGroupIds.VpcSecurityGroupId"
)
return vpc_security_group_ids
def _get_iam_roles(self):
iam_roles = self._get_multi_param("IamRoles.member")
if not iam_roles:
iam_roles = self._get_multi_param("IamRoles.IamRoleArn")
return iam_roles
def _get_subnet_ids(self):
subnet_ids = self._get_multi_param("SubnetIds.member")
if not subnet_ids:
subnet_ids = self._get_multi_param("SubnetIds.SubnetIdentifier")
return subnet_ids
def create_cluster(self):
cluster_kwargs = {
"cluster_identifier": self._get_param("ClusterIdentifier"),
"node_type": self._get_param("NodeType"),
"master_username": self._get_param("MasterUsername"),
"master_user_password": self._get_param("MasterUserPassword"),
"db_name": self._get_param("DBName"),
"cluster_type": self._get_param("ClusterType"),
"cluster_security_groups": self._get_cluster_security_groups(),
"vpc_security_group_ids": self._get_vpc_security_group_ids(),
"cluster_subnet_group_name": self._get_param("ClusterSubnetGroupName"),
"availability_zone": self._get_param("AvailabilityZone"),
"preferred_maintenance_window": self._get_param(
"PreferredMaintenanceWindow"
),
"cluster_parameter_group_name": self._get_param(
"ClusterParameterGroupName"
),
"automated_snapshot_retention_period": self._get_int_param(
"AutomatedSnapshotRetentionPeriod"
),
"port": self._get_int_param("Port"),
"cluster_version": self._get_param("ClusterVersion"),
"allow_version_upgrade": self._get_bool_param("AllowVersionUpgrade"),
"number_of_nodes": self._get_int_param("NumberOfNodes"),
"publicly_accessible": self._get_param("PubliclyAccessible"),
"encrypted": self._get_param("Encrypted"),
"region_name": self.region,
"tags": self.unpack_complex_list_params("Tags.Tag", ("Key", "Value")),
"iam_roles_arn": self._get_iam_roles(),
"enhanced_vpc_routing": self._get_param("EnhancedVpcRouting"),
"kms_key_id": self._get_param("KmsKeyId"),
}
cluster = self.redshift_backend.create_cluster(**cluster_kwargs).to_json()
cluster["ClusterStatus"] = "creating"
return self.get_response(
{
"CreateClusterResponse": {
"CreateClusterResult": {"Cluster": cluster},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def restore_from_cluster_snapshot(self):
enhanced_vpc_routing = self._get_bool_param("EnhancedVpcRouting")
restore_kwargs = {
"snapshot_identifier": self._get_param("SnapshotIdentifier"),
"cluster_identifier": self._get_param("ClusterIdentifier"),
"port": self._get_int_param("Port"),
"availability_zone": self._get_param("AvailabilityZone"),
"allow_version_upgrade": self._get_bool_param("AllowVersionUpgrade"),
"cluster_subnet_group_name": self._get_param("ClusterSubnetGroupName"),
"publicly_accessible": self._get_param("PubliclyAccessible"),
"cluster_parameter_group_name": self._get_param(
"ClusterParameterGroupName"
),
"cluster_security_groups": self._get_cluster_security_groups(),
"vpc_security_group_ids": self._get_vpc_security_group_ids(),
"preferred_maintenance_window": self._get_param(
"PreferredMaintenanceWindow"
),
"automated_snapshot_retention_period": self._get_int_param(
"AutomatedSnapshotRetentionPeriod"
),
"region_name": self.region,
"iam_roles_arn": self._get_iam_roles(),
}
if enhanced_vpc_routing is not None:
restore_kwargs["enhanced_vpc_routing"] = enhanced_vpc_routing
cluster = self.redshift_backend.restore_from_cluster_snapshot(
**restore_kwargs
).to_json()
cluster["ClusterStatus"] = "creating"
return self.get_response(
{
"RestoreFromClusterSnapshotResponse": {
"RestoreFromClusterSnapshotResult": {"Cluster": cluster},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def describe_clusters(self):
cluster_identifier = self._get_param("ClusterIdentifier")
clusters = self.redshift_backend.describe_clusters(cluster_identifier)
return self.get_response(
{
"DescribeClustersResponse": {
"DescribeClustersResult": {
"Clusters": [cluster.to_json() for cluster in clusters]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def modify_cluster(self):
request_kwargs = {
"cluster_identifier": self._get_param("ClusterIdentifier"),
"new_cluster_identifier": self._get_param("NewClusterIdentifier"),
"node_type": self._get_param("NodeType"),
"master_user_password": self._get_param("MasterUserPassword"),
"cluster_type": self._get_param("ClusterType"),
"cluster_security_groups": self._get_cluster_security_groups(),
"vpc_security_group_ids": self._get_vpc_security_group_ids(),
"cluster_subnet_group_name": self._get_param("ClusterSubnetGroupName"),
"preferred_maintenance_window": self._get_param(
"PreferredMaintenanceWindow"
),
"cluster_parameter_group_name": self._get_param(
"ClusterParameterGroupName"
),
"automated_snapshot_retention_period": self._get_int_param(
"AutomatedSnapshotRetentionPeriod"
),
"cluster_version": self._get_param("ClusterVersion"),
"allow_version_upgrade": self._get_bool_param("AllowVersionUpgrade"),
"number_of_nodes": self._get_int_param("NumberOfNodes"),
"publicly_accessible": self._get_param("PubliclyAccessible"),
"encrypted": self._get_param("Encrypted"),
"iam_roles_arn": self._get_iam_roles(),
"enhanced_vpc_routing": self._get_param("EnhancedVpcRouting"),
}
cluster_kwargs = {}
# We only want parameters that were actually passed in, otherwise
# we'll stomp all over our cluster metadata with None values.
for (key, value) in iteritems(request_kwargs):
if value is not None and value != []:
cluster_kwargs[key] = value
cluster = self.redshift_backend.modify_cluster(**cluster_kwargs)
return self.get_response(
{
"ModifyClusterResponse": {
"ModifyClusterResult": {"Cluster": cluster.to_json()},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def delete_cluster(self):
request_kwargs = {
"cluster_identifier": self._get_param("ClusterIdentifier"),
"final_cluster_snapshot_identifier": self._get_param(
"FinalClusterSnapshotIdentifier"
),
"skip_final_snapshot": self._get_bool_param("SkipFinalClusterSnapshot"),
}
cluster = self.redshift_backend.delete_cluster(**request_kwargs)
return self.get_response(
{
"DeleteClusterResponse": {
"DeleteClusterResult": {"Cluster": cluster.to_json()},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def create_cluster_subnet_group(self):
cluster_subnet_group_name = self._get_param("ClusterSubnetGroupName")
description = self._get_param("Description")
subnet_ids = self._get_subnet_ids()
tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
subnet_group = self.redshift_backend.create_cluster_subnet_group(
cluster_subnet_group_name=cluster_subnet_group_name,
description=description,
subnet_ids=subnet_ids,
region_name=self.region,
tags=tags,
)
return self.get_response(
{
"CreateClusterSubnetGroupResponse": {
"CreateClusterSubnetGroupResult": {
"ClusterSubnetGroup": subnet_group.to_json()
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def describe_cluster_subnet_groups(self):
subnet_identifier = self._get_param("ClusterSubnetGroupName")
subnet_groups = self.redshift_backend.describe_cluster_subnet_groups(
subnet_identifier
)
return self.get_response(
{
"DescribeClusterSubnetGroupsResponse": {
"DescribeClusterSubnetGroupsResult": {
"ClusterSubnetGroups": [
subnet_group.to_json() for subnet_group in subnet_groups
]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def delete_cluster_subnet_group(self):
subnet_identifier = self._get_param("ClusterSubnetGroupName")
self.redshift_backend.delete_cluster_subnet_group(subnet_identifier)
return self.get_response(
{
"DeleteClusterSubnetGroupResponse": {
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
}
}
}
)
def create_cluster_security_group(self):
cluster_security_group_name = self._get_param("ClusterSecurityGroupName")
description = self._get_param("Description")
tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
security_group = self.redshift_backend.create_cluster_security_group(
cluster_security_group_name=cluster_security_group_name,
description=description,
region_name=self.region,
tags=tags,
)
return self.get_response(
{
"CreateClusterSecurityGroupResponse": {
"CreateClusterSecurityGroupResult": {
"ClusterSecurityGroup": security_group.to_json()
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def describe_cluster_security_groups(self):
cluster_security_group_name = self._get_param("ClusterSecurityGroupName")
security_groups = self.redshift_backend.describe_cluster_security_groups(
cluster_security_group_name
)
return self.get_response(
{
"DescribeClusterSecurityGroupsResponse": {
"DescribeClusterSecurityGroupsResult": {
"ClusterSecurityGroups": [
security_group.to_json()
for security_group in security_groups
]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def delete_cluster_security_group(self):
security_group_identifier = self._get_param("ClusterSecurityGroupName")
self.redshift_backend.delete_cluster_security_group(security_group_identifier)
return self.get_response(
{
"DeleteClusterSecurityGroupResponse": {
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
}
}
}
)
def authorize_cluster_security_group_ingress(self):
cluster_security_group_name = self._get_param("ClusterSecurityGroupName")
cidr_ip = self._get_param("CIDRIP")
security_group = self.redshift_backend.authorize_cluster_security_group_ingress(
cluster_security_group_name, cidr_ip
)
return self.get_response(
{
"AuthorizeClusterSecurityGroupIngressResponse": {
"AuthorizeClusterSecurityGroupIngressResult": {
"ClusterSecurityGroup": {
"ClusterSecurityGroupName": cluster_security_group_name,
"Description": security_group.description,
"IPRanges": [
{
"Status": "authorized",
"CIDRIP": cidr_ip,
"Tags": security_group.tags,
},
],
}
}
}
}
)
def create_cluster_parameter_group(self):
cluster_parameter_group_name = self._get_param("ParameterGroupName")
group_family = self._get_param("ParameterGroupFamily")
description = self._get_param("Description")
tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
parameter_group = self.redshift_backend.create_cluster_parameter_group(
cluster_parameter_group_name, group_family, description, self.region, tags
)
return self.get_response(
{
"CreateClusterParameterGroupResponse": {
"CreateClusterParameterGroupResult": {
"ClusterParameterGroup": parameter_group.to_json()
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def describe_cluster_parameter_groups(self):
cluster_parameter_group_name = self._get_param("ParameterGroupName")
parameter_groups = self.redshift_backend.describe_cluster_parameter_groups(
cluster_parameter_group_name
)
return self.get_response(
{
"DescribeClusterParameterGroupsResponse": {
"DescribeClusterParameterGroupsResult": {
"ParameterGroups": [
parameter_group.to_json()
for parameter_group in parameter_groups
]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def delete_cluster_parameter_group(self):
cluster_parameter_group_name = self._get_param("ParameterGroupName")
self.redshift_backend.delete_cluster_parameter_group(
cluster_parameter_group_name
)
return self.get_response(
{
"DeleteClusterParameterGroupResponse": {
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
}
}
}
)
def create_cluster_snapshot(self):
cluster_identifier = self._get_param("ClusterIdentifier")
snapshot_identifier = self._get_param("SnapshotIdentifier")
tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
snapshot = self.redshift_backend.create_cluster_snapshot(
cluster_identifier, snapshot_identifier, self.region, tags
)
return self.get_response(
{
"CreateClusterSnapshotResponse": {
"CreateClusterSnapshotResult": {"Snapshot": snapshot.to_json()},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def describe_cluster_snapshots(self):
cluster_identifier = self._get_param("ClusterIdentifier")
snapshot_identifier = self._get_param("SnapshotIdentifier")
snapshots = self.redshift_backend.describe_cluster_snapshots(
cluster_identifier, snapshot_identifier
)
return self.get_response(
{
"DescribeClusterSnapshotsResponse": {
"DescribeClusterSnapshotsResult": {
"Snapshots": [snapshot.to_json() for snapshot in snapshots]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def delete_cluster_snapshot(self):
snapshot_identifier = self._get_param("SnapshotIdentifier")
snapshot = self.redshift_backend.delete_cluster_snapshot(snapshot_identifier)
return self.get_response(
{
"DeleteClusterSnapshotResponse": {
"DeleteClusterSnapshotResult": {"Snapshot": snapshot.to_json()},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def create_snapshot_copy_grant(self):
copy_grant_kwargs = {
"snapshot_copy_grant_name": self._get_param("SnapshotCopyGrantName"),
"kms_key_id": self._get_param("KmsKeyId"),
"region_name": self._get_param("Region"),
}
copy_grant = self.redshift_backend.create_snapshot_copy_grant(
**copy_grant_kwargs
)
return self.get_response(
{
"CreateSnapshotCopyGrantResponse": {
"CreateSnapshotCopyGrantResult": {
"SnapshotCopyGrant": copy_grant.to_json()
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def delete_snapshot_copy_grant(self):
copy_grant_kwargs = {
"snapshot_copy_grant_name": self._get_param("SnapshotCopyGrantName")
}
self.redshift_backend.delete_snapshot_copy_grant(**copy_grant_kwargs)
return self.get_response(
{
"DeleteSnapshotCopyGrantResponse": {
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
}
}
}
)
def describe_snapshot_copy_grants(self):
copy_grant_kwargs = {
"snapshot_copy_grant_name": self._get_param("SnapshotCopyGrantName")
}
copy_grants = self.redshift_backend.describe_snapshot_copy_grants(
**copy_grant_kwargs
)
return self.get_response(
{
"DescribeSnapshotCopyGrantsResponse": {
"DescribeSnapshotCopyGrantsResult": {
"SnapshotCopyGrants": [
copy_grant.to_json() for copy_grant in copy_grants
]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def create_tags(self):
resource_name = self._get_param("ResourceName")
tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
self.redshift_backend.create_tags(resource_name, tags)
return self.get_response(
{
"CreateTagsResponse": {
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
}
}
}
)
def describe_tags(self):
resource_name = self._get_param("ResourceName")
resource_type = self._get_param("ResourceType")
tagged_resources = self.redshift_backend.describe_tags(
resource_name, resource_type
)
return self.get_response(
{
"DescribeTagsResponse": {
"DescribeTagsResult": {"TaggedResources": tagged_resources},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def delete_tags(self):
resource_name = self._get_param("ResourceName")
tag_keys = self.unpack_list_params("TagKeys.TagKey")
self.redshift_backend.delete_tags(resource_name, tag_keys)
return self.get_response(
{
"DeleteTagsResponse": {
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
}
}
}
)
def enable_snapshot_copy(self):
snapshot_copy_kwargs = {
"cluster_identifier": self._get_param("ClusterIdentifier"),
"destination_region": self._get_param("DestinationRegion"),
"retention_period": self._get_param("RetentionPeriod", 7),
"snapshot_copy_grant_name": self._get_param("SnapshotCopyGrantName"),
}
cluster = self.redshift_backend.enable_snapshot_copy(**snapshot_copy_kwargs)
return self.get_response(
{
"EnableSnapshotCopyResponse": {
"EnableSnapshotCopyResult": {"Cluster": cluster.to_json()},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def disable_snapshot_copy(self):
snapshot_copy_kwargs = {
"cluster_identifier": self._get_param("ClusterIdentifier")
}
cluster = self.redshift_backend.disable_snapshot_copy(**snapshot_copy_kwargs)
return self.get_response(
{
"DisableSnapshotCopyResponse": {
"DisableSnapshotCopyResult": {"Cluster": cluster.to_json()},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def modify_snapshot_copy_retention_period(self):
snapshot_copy_kwargs = {
"cluster_identifier": self._get_param("ClusterIdentifier"),
"retention_period": self._get_param("RetentionPeriod"),
}
cluster = self.redshift_backend.modify_snapshot_copy_retention_period(
**snapshot_copy_kwargs
)
return self.get_response(
{
"ModifySnapshotCopyRetentionPeriodResponse": {
"ModifySnapshotCopyRetentionPeriodResult": {
"Clusters": [cluster.to_json()]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def get_cluster_credentials(self):
cluster_identifier = self._get_param("ClusterIdentifier")
db_user = self._get_param("DbUser")
auto_create = self._get_bool_param("AutoCreate", False)
duration_seconds = self._get_int_param("DurationSeconds", 900)
cluster_credentials = self.redshift_backend.get_cluster_credentials(
cluster_identifier, db_user, auto_create, duration_seconds
)
return self.get_response(
{
"GetClusterCredentialsResponse": {
"GetClusterCredentialsResult": cluster_credentials,
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
|
william-richard/moto
|
moto/redshift/responses.py
|
Python
|
apache-2.0
| 28,522
|
# -*- coding: utf-8 -*-
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import tensor_util
from intel_quantization.quantize_graph.quantize_graph_base import QuantizeNodeBase
from intel_quantization.quantize_graph.quantize_graph_common import QuantizeGraphHelper as helper
class FuseNodeStartWithPad(QuantizeNodeBase):
def __init__(self, input_graph, output_node_names, perchannel,
start_node_name):
super(FuseNodeStartWithPad,
self).__init__(input_graph, output_node_names, perchannel,
start_node_name)
def has_relu(self, node_name):
for _, value in self.node_name_mapping.items():
if value.node.name == node_name:
break
if value.node.op in ("Relu", "Relu6"):
return True
return False
def _apply_pad_conv_fusion(self):
for _, value in self.node_name_mapping.items():
if value.node.op in ("Pad") and self.node_name_mapping[
value.
output[0]].node.op == "Conv2D" and self._find_relu_node(
value.node):
paddings_tensor = tensor_util.MakeNdarray(
self.node_name_mapping[value.node.input[1]].node.
attr["value"].tensor).flatten()
if any(paddings_tensor):
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(value.node)
self.add_output_graph_node(new_node)
else:
self.node_name_mapping[
value.output[0]].node.input[0] = value.node.input[0]
helper.set_attr_int_list(
self.node_name_mapping[value.output[0]].node,
"padding_list", paddings_tensor)
else:
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(value.node)
self.add_output_graph_node(new_node)
def get_longest_fuse(self):
return 2 # pad + conv
def apply_the_transform(self):
self._get_op_list()
self._apply_pad_conv_fusion()
self._reset_output_node_maps()
self.output_graph = self.remove_redundant_quantization(
self.output_graph)
# self.remove_dead_nodes(self.output_node_names)
return self.output_graph
|
mlperf/training_results_v0.7
|
Intel/benchmarks/minigo/8-nodes-32s-cpx-tensorflow/intel_quantization/quantize_graph/quantize_graph_pad.py
|
Python
|
apache-2.0
| 2,437
|
#! /usr/bin/env python
# Copyright 2011, 2013-2014 OpenStack Foundation
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import irc.client
import logging
import random
import string
import ssl
import sys
import time
import yaml
logging.basicConfig(level=logging.INFO)
class CheckAccess(irc.client.SimpleIRCClient):
log = logging.getLogger("checkaccess")
def __init__(self, channels, nick, flags):
irc.client.SimpleIRCClient.__init__(self)
self.identify_msg_cap = False
self.channels = channels
self.nick = nick
self.flags = flags
self.current_channel = None
self.current_list = []
self.failed = None
def on_disconnect(self, connection, event):
if self.failed is not False:
sys.exit(1)
else:
sys.exit(0)
def on_welcome(self, c, e):
self.identify_msg_cap = False
self.log.debug("Requesting identify-msg capability")
c.cap('REQ', 'identify-msg')
c.cap('END')
def on_cap(self, c, e):
self.log.debug("Received cap response %s" % repr(e.arguments))
if e.arguments[0] == 'ACK' and 'identify-msg' in e.arguments[1]:
self.log.debug("identify-msg cap acked")
self.identify_msg_cap = True
self.advance()
def on_privnotice(self, c, e):
if not self.identify_msg_cap:
self.log.debug("Ignoring message because identify-msg "
"cap not enabled")
return
nick = e.source.split('!')[0]
auth = e.arguments[0][0]
msg = e.arguments[0][1:]
if auth != '+' or nick != 'ChanServ':
self.log.debug("Ignoring message from unauthenticated "
"user %s" % nick)
return
self.advance(msg)
def advance(self, msg=None):
if not self.current_channel:
if not self.channels:
self.connection.quit()
return
self.current_channel = self.channels.pop()
self.current_list = []
self.connection.privmsg('chanserv', 'access list %s' %
self.current_channel)
time.sleep(1)
return
if msg.endswith('is not registered.'):
self.failed = True
print("%s is not registered with ChanServ." %
self.current_channel)
self.current_channel = None
self.advance()
return
if msg.startswith('End of'):
found = False
for nick, flags, msg in self.current_list:
if nick == self.nick and flags == self.flags:
self.log.info('%s access ok on %s' %
(self.nick, self.current_channel))
found = True
break
if not found:
self.failed = True
print("%s does not have permissions on %s:" %
(self.nick, self.current_channel))
for nick, flags, msg in self.current_list:
print(msg)
print
# If this is the first channel checked, set the failure
# flag to false because we know that the system is
# operating well enough to check at least one channel.
if self.failed is None:
self.failed = False
self.current_channel = None
self.advance()
return
parts = msg.split()
self.current_list.append((parts[1], parts[2], msg))
def main():
parser = argparse.ArgumentParser(description='IRC channel access check')
parser.add_argument('-l', dest='config',
default='/etc/accessbot/channels.yaml',
help='path to the config file')
parser.add_argument('-s', dest='server',
default='chat.freenode.net',
help='IRC server')
parser.add_argument('-p', dest='port',
default=6697,
help='IRC port')
parser.add_argument('nick',
help='the nick for which access should be validated')
args = parser.parse_args()
config = yaml.load(open(args.config))
channels = []
for channel in config['channels']:
channels.append('#' + channel['name'])
access_level = None
for level, names in config['global'].items():
if args.nick in names:
access_level = level
if access_level is None:
raise Exception("Unable to determine global access level for %s" %
args.nick)
flags = config['access'][access_level]
a = CheckAccess(channels, args.nick, flags)
mynick = ''.join(random.choice(string.ascii_uppercase)
for x in range(16))
port = int(args.port)
if port == 6697:
factory = irc.connection.Factory(wrapper=ssl.wrap_socket)
a.connect(args.server, int(args.port), mynick,
connect_factory=factory)
else:
a.connect(args.server, int(args.port), mynick)
a.start()
if __name__ == "__main__":
main()
|
Tesora/tesora-project-config
|
tools/check_irc_access.py
|
Python
|
apache-2.0
| 5,790
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume manager manages creating, attaching, detaching, and persistent storage.
Persistent storage volumes keep their state independent of instances. You can
attach to an instance, terminate the instance, spawn a new instance (even
one from a different image) and re-attach the volume with the same data
intact.
**Related Flags**
:volume_topic: What :mod:`rpc` topic to listen to (default: `cinder-volume`).
:volume_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`cinder.volume.manager.Manager`).
:volume_driver: Used by :class:`Manager`. Defaults to
:class:`cinder.volume.drivers.lvm.LVMISCSIDriver`.
:volume_group: Name of the group that will contain exported volumes (default:
`cinder-volumes`)
:num_shell_tries: Number of times to attempt to run commands (default: 3)
"""
import time
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
from osprofiler import profiler
import six
from taskflow import exceptions as tfe
from cinder import compute
from cinder import context
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import glance
from cinder import manager
from cinder.openstack.common import periodic_task
from cinder import quota
from cinder import utils
from cinder.volume import configuration as config
from cinder.volume.flows.manager import create_volume
from cinder.volume.flows.manager import manage_existing
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as vol_utils
from cinder.volume import volume_types
from eventlet import greenpool
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
CGQUOTAS = quota.CGQUOTAS
VALID_REMOVE_VOL_FROM_CG_STATUS = ('available', 'in-use',)
VALID_CREATE_CG_SRC_SNAP_STATUS = ('available',)
volume_manager_opts = [
cfg.StrOpt('volume_driver',
default='cinder.volume.drivers.lvm.LVMISCSIDriver',
help='Driver to use for volume creation'),
cfg.IntOpt('migration_create_volume_timeout_secs',
default=300,
help='Timeout for creating the volume to migrate to '
'when performing volume migration (seconds)'),
cfg.BoolOpt('volume_service_inithost_offload',
default=False,
help='Offload pending volume delete during '
'volume service startup'),
cfg.StrOpt('zoning_mode',
default='none',
help='FC Zoning mode configured'),
cfg.StrOpt('extra_capabilities',
default='{}',
help='User defined capabilities, a JSON formatted string '
'specifying key/value pairs. The key/value pairs can '
'be used by the CapabilitiesFilter to select between '
'backends when requests specify volume types. For '
'example, specifying a service level or the geographical '
'location of a backend, then creating a volume type to '
'allow the user to select by these different '
'properties.'),
]
CONF = cfg.CONF
CONF.register_opts(volume_manager_opts)
MAPPING = {
'cinder.volume.drivers.huawei.huawei_hvs.HuaweiHVSISCSIDriver':
'cinder.volume.drivers.huawei.huawei_18000.Huawei18000ISCSIDriver',
'cinder.volume.drivers.huawei.huawei_hvs.HuaweiHVSFCDriver':
'cinder.volume.drivers.huawei.huawei_18000.Huawei18000FCDriver',
'cinder.volume.drivers.fujitsu_eternus_dx_fc.FJDXFCDriver':
'cinder.volume.drivers.fujitsu.eternus_dx_fc.FJDXFCDriver',
'cinder.volume.drivers.fujitsu_eternus_dx_iscsi.FJDXISCSIDriver':
'cinder.volume.drivers.fujitsu.eternus_dx_iscsi.FJDXISCSIDriver', }
def locked_volume_operation(f):
"""Lock decorator for volume operations.
Takes a named lock prior to executing the operation. The lock is named with
the operation executed and the id of the volume. This lock can then be used
by other operations to avoid operation conflicts on shared volumes.
Example use:
If a volume operation uses this decorator, it will block until the named
lock is free. This is used to protect concurrent operations on the same
volume e.g. delete VolA while create volume VolB from VolA is in progress.
"""
def lvo_inner1(inst, context, volume_id, **kwargs):
@utils.synchronized("%s-%s" % (volume_id, f.__name__), external=True)
def lvo_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return lvo_inner2(inst, context, volume_id, **kwargs)
return lvo_inner1
def locked_detach_operation(f):
"""Lock decorator for volume detach operations.
Takes a named lock prior to executing the detach call. The lock is named
with the operation executed and the id of the volume. This lock can then
be used by other operations to avoid operation conflicts on shared volumes.
This locking mechanism is only for detach calls. We can't use the
locked_volume_operation, because detach requires an additional
attachment_id in the parameter list.
"""
def ldo_inner1(inst, context, volume_id, attachment_id=None, **kwargs):
@utils.synchronized("%s-%s" % (volume_id, f.__name__), external=True)
def ldo_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return ldo_inner2(inst, context, volume_id, attachment_id, **kwargs)
return ldo_inner1
def locked_snapshot_operation(f):
"""Lock decorator for snapshot operations.
Takes a named lock prior to executing the operation. The lock is named with
the operation executed and the id of the snapshot. This lock can then be
used by other operations to avoid operation conflicts on shared snapshots.
Example use:
If a snapshot operation uses this decorator, it will block until the named
lock is free. This is used to protect concurrent operations on the same
snapshot e.g. delete SnapA while create volume VolA from SnapA is in
progress.
"""
def lso_inner1(inst, context, snapshot, **kwargs):
@utils.synchronized("%s-%s" % (snapshot.id, f.__name__), external=True)
def lso_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return lso_inner2(inst, context, snapshot, **kwargs)
return lso_inner1
class VolumeManager(manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
RPC_API_VERSION = '1.23'
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, volume_driver=None, service_name=None,
*args, **kwargs):
"""Load the driver from the one specified in args, or from flags."""
# update_service_capabilities needs service_name to be volume
super(VolumeManager, self).__init__(service_name='volume',
*args, **kwargs)
self.configuration = config.Configuration(volume_manager_opts,
config_group=service_name)
self._tp = greenpool.GreenPool()
self.stats = {}
if not volume_driver:
# Get from configuration, which will get the default
# if its not using the multi backend
volume_driver = self.configuration.volume_driver
if volume_driver in MAPPING:
LOG.warning(_LW("Driver path %s is deprecated, update your "
"configuration to the new path."), volume_driver)
volume_driver = MAPPING[volume_driver]
vol_db_empty = self._set_voldb_empty_at_startup_indicator(
context.get_admin_context())
LOG.debug("Cinder Volume DB check: vol_db_empty=%s" % vol_db_empty)
self.driver = importutils.import_object(
volume_driver,
configuration=self.configuration,
db=self.db,
host=self.host,
is_vol_db_empty=vol_db_empty)
self.driver = profiler.trace_cls("driver")(self.driver)
try:
self.extra_capabilities = jsonutils.loads(
self.driver.configuration.extra_capabilities)
except AttributeError:
self.extra_capabilities = {}
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Invalid JSON: %s" %
self.driver.configuration.extra_capabilities)
def _add_to_threadpool(self, func, *args, **kwargs):
self._tp.spawn_n(func, *args, **kwargs)
def _count_allocated_capacity(self, ctxt, volume):
pool = vol_utils.extract_host(volume['host'], 'pool')
if pool is None:
# No pool name encoded in host, so this is a legacy
# volume created before pool is introduced, ask
# driver to provide pool info if it has such
# knowledge and update the DB.
try:
pool = self.driver.get_pool(volume)
except Exception as err:
LOG.error(_LE('Failed to fetch pool name for volume: %s'),
volume['id'])
LOG.exception(err)
return
if pool:
new_host = vol_utils.append_host(volume['host'],
pool)
self.db.volume_update(ctxt, volume['id'],
{'host': new_host})
else:
# Otherwise, put them into a special fixed pool with
# volume_backend_name being the pool name, if
# volume_backend_name is None, use default pool name.
# This is only for counting purpose, doesn't update DB.
pool = (self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume['host'], 'pool', True))
try:
pool_stat = self.stats['pools'][pool]
except KeyError:
# First volume in the pool
self.stats['pools'][pool] = dict(
allocated_capacity_gb=0)
pool_stat = self.stats['pools'][pool]
pool_sum = pool_stat['allocated_capacity_gb']
pool_sum += volume['size']
self.stats['pools'][pool]['allocated_capacity_gb'] = pool_sum
self.stats['allocated_capacity_gb'] += volume['size']
def _set_voldb_empty_at_startup_indicator(self, ctxt):
"""Determine if the Cinder volume DB is empty.
A check of the volume DB is done to determine whether it is empty or
not at this point.
:param ctxt: our working context
"""
vol_entries = self.db.volume_get_all(ctxt, None, 1, filters=None)
if len(vol_entries) == 0:
LOG.info(_LI("Determined volume DB was empty at startup."))
return True
else:
LOG.info(_LI("Determined volume DB was not empty at startup."))
return False
def init_host(self):
"""Perform any required initialization."""
ctxt = context.get_admin_context()
LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)") %
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
self.driver.do_setup(ctxt)
self.driver.check_for_setup_error()
except Exception as ex:
LOG.error(_LE("Error encountered during "
"initialization of driver: %(name)s") %
{'name': self.driver.__class__.__name__})
LOG.exception(ex)
# we don't want to continue since we failed
# to initialize the driver correctly.
return
volumes = self.db.volume_get_all_by_host(ctxt, self.host)
# FIXME volume count for exporting is wrong
LOG.debug("Re-exporting %s volumes" % len(volumes))
try:
self.stats['pools'] = {}
self.stats.update({'allocated_capacity_gb': 0})
for volume in volumes:
# available volume should also be counted into allocated
if volume['status'] in ['in-use', 'available']:
# calculate allocated capacity for driver
self._count_allocated_capacity(ctxt, volume)
try:
if volume['status'] in ['in-use']:
self.driver.ensure_export(ctxt, volume)
except Exception as export_ex:
LOG.error(_LE("Failed to re-export volume %s: "
"setting to error state"), volume['id'])
LOG.exception(export_ex)
self.db.volume_update(ctxt,
volume['id'],
{'status': 'error'})
elif volume['status'] in ('downloading', 'creating'):
LOG.info(_LI("volume %(volume_id)s stuck in "
"%(volume_stat)s state. "
"Changing to error state."),
{'volume_id': volume['id'],
'volume_stat': volume['status']})
if volume['status'] == 'downloading':
self.driver.clear_download(ctxt, volume)
self.db.volume_update(ctxt,
volume['id'],
{'status': 'error'})
else:
LOG.info(_LI("volume %s: skipping export"), volume['id'])
snapshots = self.db.snapshot_get_by_host(ctxt,
self.host,
{'status': 'creating'})
for snapshot in snapshots:
LOG.info(_LI("snapshot %(snap_id)s stuck in "
"%(snap_stat)s state. "
"Changing to error state."),
{'snap_id': snapshot['id'],
'snap_stat': snapshot['status']})
self.db.snapshot_update(ctxt,
snapshot['id'],
{'status': 'error'})
except Exception as ex:
LOG.error(_LE("Error encountered during "
"re-exporting phase of driver initialization: "
" %(name)s") %
{'name': self.driver.__class__.__name__})
LOG.exception(ex)
return
self.driver.set_throttle()
# at this point the driver is considered initialized.
self.driver.set_initialized()
LOG.debug('Resuming any in progress delete operations')
for volume in volumes:
if volume['status'] == 'deleting':
LOG.info(_LI('Resuming delete on volume: %s') % volume['id'])
if CONF.volume_service_inithost_offload:
# Offload all the pending volume delete operations to the
# threadpool to prevent the main volume service thread
# from being blocked.
self._add_to_threadpool(self.delete_volume, ctxt,
volume['id'])
else:
# By default, delete volumes sequentially
self.delete_volume(ctxt, volume['id'])
# collect and publish service capabilities
self.publish_service_capabilities(ctxt)
# conditionally run replication status task
stats = self.driver.get_volume_stats(refresh=True)
if stats and stats.get('replication', False):
@periodic_task.periodic_task
def run_replication_task(self, ctxt):
self._update_replication_relationship_status(ctxt)
self.add_periodic_task(run_replication_task)
def create_volume(self, context, volume_id, request_spec=None,
filter_properties=None, allow_reschedule=True,
snapshot_id=None, image_id=None, source_volid=None,
source_replicaid=None, consistencygroup_id=None,
cgsnapshot_id=None):
"""Creates the volume."""
context_elevated = context.elevated()
if filter_properties is None:
filter_properties = {}
try:
# NOTE(flaper87): Driver initialization is
# verified by the task itself.
flow_engine = create_volume.get_flow(
context_elevated,
self.db,
self.driver,
self.scheduler_rpcapi,
self.host,
volume_id,
allow_reschedule,
context,
request_spec,
filter_properties,
snapshot_id=snapshot_id,
image_id=image_id,
source_volid=source_volid,
source_replicaid=source_replicaid,
consistencygroup_id=consistencygroup_id,
cgsnapshot_id=cgsnapshot_id)
except Exception:
LOG.exception(_LE("Failed to create manager volume flow"))
raise exception.CinderException(
_("Failed to create manager volume flow."))
if snapshot_id is not None:
# Make sure the snapshot is not deleted until we are done with it.
locked_action = "%s-%s" % (snapshot_id, 'delete_snapshot')
elif source_volid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_volid, 'delete_volume')
elif source_replicaid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_replicaid, 'delete_volume')
else:
locked_action = None
def _run_flow():
# This code executes create volume flow. If something goes wrong,
# flow reverts all job that was done and reraises an exception.
# Otherwise, all data that was generated by flow becomes available
# in flow engine's storage.
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
@utils.synchronized(locked_action, external=True)
def _run_flow_locked():
_run_flow()
# NOTE(dulek): Flag to indicate if volume was rescheduled. Used to
# decide if allocated_capacity should be incremented.
rescheduled = False
try:
if locked_action is None:
_run_flow()
else:
_run_flow_locked()
except Exception as e:
if hasattr(e, 'rescheduled'):
rescheduled = e.rescheduled
raise
finally:
try:
vol_ref = flow_engine.storage.fetch('volume_ref')
except tfe.NotFound as e:
# Flow was reverted, fetching volume_ref from the DB.
vol_ref = self.db.volume_get(context, volume_id)
if not rescheduled:
# NOTE(dulek): Volume wasn't rescheduled so we need to update
# volume stats as these are decremented on delete.
self._update_allocated_capacity(vol_ref)
return vol_ref['id']
@locked_volume_operation
def delete_volume(self, context, volume_id, unmanage_only=False):
"""Deletes and unexports volume.
1. Delete a volume(normal case)
Delete a volume and update quotas.
2. Delete a migration source volume
If deleting the source volume in a migration, we want to skip
quotas. Also we want to skip other database updates for source
volume because these update will be handled at
migrate_volume_completion properly.
3. Delete a migration destination volume
If deleting the destination volume in a migration, we want to
skip quotas but we need database updates for the volume.
"""
context = context.elevated()
try:
volume_ref = self.db.volume_get(context, volume_id)
except exception.VolumeNotFound:
# NOTE(thingee): It could be possible for a volume to
# be deleted when resuming deletes from init_host().
LOG.info(_LI("Tried to delete volume %s, but it no longer exists, "
"moving on") % (volume_id))
return True
if context.project_id != volume_ref['project_id']:
project_id = volume_ref['project_id']
else:
project_id = context.project_id
LOG.info(_LI("volume %s: deleting"), volume_ref['id'])
if volume_ref['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_id)
if (vol_utils.extract_host(volume_ref['host']) != self.host):
raise exception.InvalidVolume(
reason=_("volume is not local to this node"))
is_migrating = volume_ref['migration_status'] is not None
is_migrating_dest = (is_migrating and
volume_ref['migration_status'].startswith(
'target:'))
self._notify_about_volume_usage(context, volume_ref, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.debug("volume %s: removing export", volume_ref['id'])
self.driver.remove_export(context, volume_ref)
LOG.debug("volume %s: deleting", volume_ref['id'])
if unmanage_only:
self.driver.unmanage(volume_ref)
else:
self.driver.delete_volume(volume_ref)
except exception.VolumeIsBusy:
LOG.error(_LE("Cannot delete volume %s: volume is busy"),
volume_ref['id'])
# If this is a destination volume, we have to clear the database
# record to avoid user confusion.
self._clear_db(context, is_migrating_dest, volume_ref,
'available')
return True
except Exception:
with excutils.save_and_reraise_exception():
# If this is a destination volume, we have to clear the
# database record to avoid user confusion.
self._clear_db(context, is_migrating_dest, volume_ref,
'error_deleting')
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
# Get reservations
try:
reserve_opts = {'volumes': -1,
'gigabytes': -volume_ref['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting volume"))
# If deleting the source volume in a migration, we should skip database
# update here. In other cases, continue to update database entries.
if not is_migrating or is_migrating_dest:
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
self.db.volume_destroy(context, volume_id)
LOG.info(_LI("volume %s: deleted successfully"), volume_ref['id'])
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
self._notify_about_volume_usage(context, volume_ref, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
pool = vol_utils.extract_host(volume_ref['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume_ref['host'], 'pool', True)
size = volume_ref['size']
try:
self.stats['pools'][pool]['allocated_capacity_gb'] -= size
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=-size)
self.publish_service_capabilities(context)
return True
def _clear_db(self, context, is_migrating_dest, volume_ref, status):
# This method is called when driver.unmanage() or
# driver.delete_volume() fails in delete_volume(), so it is already
# in the exception handling part.
if is_migrating_dest:
self.db.volume_destroy(context, volume_ref['id'])
LOG.error(_LE("Unable to delete the destination volume %s "
"during volume migration, but the database "
"record needs to be deleted."),
volume_ref['id'])
else:
self.db.volume_update(context,
volume_ref['id'],
{'status': status})
def create_snapshot(self, context, volume_id, snapshot):
"""Creates and exports the snapshot."""
context = context.elevated()
LOG.info(_LI("snapshot %s: creating"), snapshot.id)
self._notify_about_snapshot_usage(
context, snapshot, "create.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
LOG.debug("snapshot %(snap_id)s: creating",
{'snap_id': snapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
model_update = self.driver.create_snapshot(snapshot)
if model_update:
snapshot.update(model_update)
snapshot.save(context)
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = 'error'
snapshot.save(context)
vol_ref = self.db.volume_get(context, volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot.id, volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.exception(_LE("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata") %
{'volume_id': volume_id,
'snapshot_id': snapshot.id})
snapshot.status = 'error'
snapshot.save(context)
raise exception.MetadataCopyFailure(reason=ex)
snapshot.status = 'available'
snapshot.progress = '100%'
snapshot.save(context)
LOG.info(_("snapshot %s: created successfully"), snapshot.id)
self._notify_about_snapshot_usage(context, snapshot, "create.end")
return snapshot.id
@locked_snapshot_operation
def delete_snapshot(self, context, snapshot):
"""Deletes and unexports snapshot."""
context = context.elevated()
project_id = snapshot.project_id
LOG.info(_("snapshot %s: deleting"), snapshot.id)
self._notify_about_snapshot_usage(
context, snapshot, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
LOG.debug("snapshot %s: deleting", snapshot.id)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
snapshot.save()
self.driver.delete_snapshot(snapshot)
except exception.SnapshotIsBusy:
LOG.error(_LE("Cannot delete snapshot %s: snapshot is busy"),
snapshot.id)
snapshot.status = 'available'
snapshot.save()
return True
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = 'error_deleting'
snapshot.save()
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot.volume_size,
}
volume_ref = self.db.volume_get(context, snapshot.volume_id)
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting snapshot"))
self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot.id)
snapshot.destroy(context)
LOG.info(_LI("snapshot %s: deleted successfully"), snapshot.id)
self._notify_about_snapshot_usage(context, snapshot, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
return True
def attach_volume(self, context, volume_id, instance_uuid, host_name,
mountpoint, mode):
"""Updates db to show volume is attached."""
@utils.synchronized(volume_id, external=True)
def do_attach():
# check the volume status before attaching
volume = self.db.volume_get(context, volume_id)
volume_metadata = self.db.volume_admin_metadata_get(
context.elevated(), volume_id)
if volume['status'] == 'attaching':
if (volume_metadata.get('attached_mode') and
volume_metadata.get('attached_mode') != mode):
msg = _("being attached by different mode")
raise exception.InvalidVolume(reason=msg)
if (volume['status'] == 'in-use' and not volume['multiattach']
and not volume['migration_status']):
msg = _("volume is already attached")
raise exception.InvalidVolume(reason=msg)
attachment = None
host_name_sanitized = utils.sanitize_hostname(
host_name) if host_name else None
if instance_uuid:
attachment = \
self.db.volume_attachment_get_by_instance_uuid(
context, volume_id, instance_uuid)
else:
attachment = \
self.db.volume_attachment_get_by_host(context, volume_id,
host_name_sanitized)
if attachment is not None:
return
self._notify_about_volume_usage(context, volume,
"attach.start")
values = {'volume_id': volume_id,
'attach_status': 'attaching', }
attachment = self.db.volume_attach(context.elevated(), values)
volume_metadata = self.db.volume_admin_metadata_update(
context.elevated(), volume_id,
{"attached_mode": mode}, False)
attachment_id = attachment['id']
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
self.db.volume_attachment_update(context, attachment_id,
{'attach_status':
'error_attaching'})
raise exception.InvalidUUID(uuid=instance_uuid)
volume = self.db.volume_get(context, volume_id)
if volume_metadata.get('readonly') == 'True' and mode != 'ro':
self.db.volume_update(context, volume_id,
{'status': 'error_attaching'})
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
self.driver.attach_volume(context,
volume,
instance_uuid,
host_name_sanitized,
mountpoint)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment_id,
{'attach_status': 'error_attaching'})
volume = self.db.volume_attached(context.elevated(),
attachment_id,
instance_uuid,
host_name_sanitized,
mountpoint,
mode)
if volume['migration_status']:
self.db.volume_update(context, volume_id,
{'migration_status': None})
self._notify_about_volume_usage(context, volume, "attach.end")
return self.db.volume_attachment_get(context, attachment_id)
return do_attach()
@locked_detach_operation
def detach_volume(self, context, volume_id, attachment_id=None):
"""Updates db to show volume is detached."""
# TODO(vish): refactor this into a more general "unreserve"
attachment = None
if attachment_id:
try:
attachment = self.db.volume_attachment_get(context,
attachment_id)
except exception.VolumeAttachmentNotFound:
LOG.error(_LE("We couldn't find the volume attachment"
" for volume %(volume_id)s and"
" attachment id %(id)s"),
{"volume_id": volume_id,
"id": attachment_id})
raise
else:
# We can try and degrade gracefuly here by trying to detach
# a volume without the attachment_id here if the volume only has
# one attachment. This is for backwards compatibility.
attachments = self.db.volume_attachment_get_used_by_volume_id(
context, volume_id)
if len(attachments) > 1:
# There are more than 1 attachments for this volume
# we have to have an attachment id.
msg = _("Volume %(id)s is attached to more than one instance"
". A valid attachment_id must be passed to detach"
" this volume") % {'id': volume_id}
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
elif len(attachments) == 1:
attachment = attachments[0]
else:
# there aren't any attachments for this volume.
msg = _("Volume %(id)s doesn't have any attachments "
"to detach") % {'id': volume_id}
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
volume = self.db.volume_get(context, volume_id)
self._notify_about_volume_usage(context, volume, "detach.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
self.driver.detach_volume(context, volume, attachment)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment.get('id'),
{'attach_status': 'error_detaching'})
self.db.volume_detached(context.elevated(), volume_id,
attachment.get('id'))
self.db.volume_admin_metadata_delete(context.elevated(), volume_id,
'attached_mode')
# NOTE(jdg): We used to do an ensure export here to
# catch upgrades while volumes were attached (E->F)
# this was necessary to convert in-use volumes from
# int ID's to UUID's. Don't need this any longer
# We're going to remove the export here
# (delete the iscsi target)
volume = self.db.volume_get(context, volume_id)
try:
utils.require_driver_initialized(self.driver)
LOG.debug("volume %s: removing export", volume_id)
self.driver.remove_export(context.elevated(), volume)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Error detaching volume %(volume)s, "
"due to uninitialized driver."),
{"volume": volume_id})
except Exception as ex:
LOG.exception(_LE("Error detaching volume %(volume)s, "
"due to remove export failure."),
{"volume": volume_id})
raise exception.RemoveExportException(volume=volume_id, reason=ex)
self._notify_about_volume_usage(context, volume, "detach.end")
def copy_volume_to_image(self, context, volume_id, image_meta):
"""Uploads the specified volume to Glance.
image_meta is a dictionary containing the following keys:
'id', 'container_format', 'disk_format'
"""
payload = {'volume_id': volume_id, 'image_id': image_meta['id']}
image_service = None
try:
volume = self.db.volume_get(context, volume_id)
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
image_service, image_id = \
glance.get_remote_image_service(context, image_meta['id'])
self.driver.copy_volume_to_image(context, volume, image_service,
image_meta)
LOG.debug("Uploaded volume %(volume_id)s to "
"image (%(image_id)s) successfully",
{'volume_id': volume_id, 'image_id': image_id})
except Exception as error:
LOG.error(_LE("Error occurred while uploading "
"volume %(volume_id)s "
"to image %(image_id)s."),
{'volume_id': volume_id, 'image_id': image_meta['id']})
if image_service is not None:
# Deletes the image if it is in queued or saving state
self._delete_image(context, image_meta['id'], image_service)
with excutils.save_and_reraise_exception():
payload['message'] = six.text_type(error)
finally:
if not volume['volume_attachment']:
self.db.volume_update(context, volume_id,
{'status': 'available'})
else:
self.db.volume_update(context, volume_id,
{'status': 'in-use'})
def _delete_image(self, context, image_id, image_service):
"""Deletes an image stuck in queued or saving state."""
try:
image_meta = image_service.show(context, image_id)
image_status = image_meta.get('status')
if image_status == 'queued' or image_status == 'saving':
LOG.warn(_LW("Deleting image %(image_id)s in %(image_status)s "
"state."),
{'image_id': image_id,
'image_status': image_status})
image_service.delete(context, image_id)
except Exception:
LOG.warn(_LW("Error occurred while deleting image %s."),
image_id, exc_info=True)
def _driver_data_namespace(self):
return self.driver.configuration.safe_get('driver_data_namespace') \
or self.driver.configuration.safe_get('volume_backend_name') \
or self.driver.__class__.__name__
def _get_driver_initiator_data(self, context, connector):
data = None
initiator = connector.get('initiator', False)
if initiator:
namespace = self._driver_data_namespace()
try:
data = self.db.driver_initiator_data_get(
context,
initiator,
namespace
)
except exception.CinderException:
LOG.exception(_LE("Failed to get driver initiator data for"
" initiator %(initiator)s and namespace"
" %(namespace)s"),
{'initiator': initiator,
'namespace': namespace})
raise
return data
def _save_driver_initiator_data(self, context, connector, model_update):
if connector.get('initiator', False) and model_update:
namespace = self._driver_data_namespace()
try:
self.db.driver_initiator_data_update(context,
connector['initiator'],
namespace,
model_update)
except exception.CinderException:
LOG.exception(_LE("Failed to update initiator data for"
" initiator %(initiator)s and backend"
" %(backend)s"),
{'initiator': connector['initiator'],
'backend': namespace})
raise
def initialize_connection(self, context, volume_id, connector):
"""Prepare volume for connection from host represented by connector.
This method calls the driver initialize_connection and returns
it to the caller. The connector parameter is a dictionary with
information about the host that will connect to the volume in the
following format::
{
'ip': ip,
'initiator': initiator,
}
ip: the ip address of the connecting machine
initiator: the iscsi initiator name of the connecting machine.
This can be None if the connecting machine does not support iscsi
connections.
driver is responsible for doing any necessary security setup and
returning a connection_info dictionary in the following format::
{
'driver_volume_type': driver_volume_type,
'data': data,
}
driver_volume_type: a string to identify the type of volume. This
can be used by the calling code to determine the
strategy for connecting to the volume. This could
be 'iscsi', 'rbd', 'sheepdog', etc.
data: this is the data that the calling code will use to connect
to the volume. Keep in mind that this will be serialized to
json in various places, so it should not contain any non-json
data types.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=err)
except Exception as err:
err_msg = (_('Unable to validate connector information in '
'backend: %(err)s') % {'err': err})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
volume = self.db.volume_get(context, volume_id)
model_update = None
try:
LOG.debug("Volume %s: creating export", volume_id)
model_update = self.driver.create_export(context.elevated(),
volume)
except exception.CinderException:
err_msg = (_('Unable to create export for volume %(volume_id)s') %
{'volume_id': volume_id})
LOG.exception(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
if model_update:
volume = self.db.volume_update(context,
volume_id,
model_update)
except exception.CinderException as ex:
LOG.exception(_LE("Failed updating model of volume %(volume_id)s"
" with driver provided model %(model)s") %
{'volume_id': volume_id, 'model': model_update})
raise exception.ExportFailure(reason=ex)
initiator_data = self._get_driver_initiator_data(context, connector)
try:
if initiator_data:
conn_info = self.driver.initialize_connection(volume,
connector,
initiator_data)
else:
conn_info = self.driver.initialize_connection(volume,
connector)
except Exception as err:
err_msg = (_('Unable to fetch connection information from '
'backend: %(err)s') % {'err': err})
LOG.error(err_msg)
self.driver.remove_export(context.elevated(), volume)
raise exception.VolumeBackendAPIException(data=err_msg)
initiator_update = conn_info.get('initiator_update', None)
if initiator_update:
self._save_driver_initiator_data(context, connector,
initiator_update)
del conn_info['initiator_update']
# Add qos_specs to connection info
typeid = volume['volume_type_id']
specs = None
if typeid:
res = volume_types.get_volume_type_qos_specs(typeid)
qos = res['qos_specs']
# only pass qos_specs that is designated to be consumed by
# front-end, or both front-end and back-end.
if qos and qos.get('consumer') in ['front-end', 'both']:
specs = qos.get('specs')
qos_spec = dict(qos_specs=specs)
conn_info['data'].update(qos_spec)
# Add access_mode to connection info
volume_metadata = self.db.volume_admin_metadata_get(context.elevated(),
volume_id)
if conn_info['data'].get('access_mode') is None:
access_mode = volume_metadata.get('attached_mode')
if access_mode is None:
# NOTE(zhiyan): client didn't call 'os-attach' before
access_mode = ('ro'
if volume_metadata.get('readonly') == 'True'
else 'rw')
conn_info['data']['access_mode'] = access_mode
return conn_info
def terminate_connection(self, context, volume_id, connector, force=False):
"""Cleanup connection from host represented by connector.
The format of connector is the same as for initialize_connection.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.terminate_connection(volume_ref, connector,
force=force)
except Exception as err:
err_msg = (_('Unable to terminate volume connection: %(err)s')
% {'err': err})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
def accept_transfer(self, context, volume_id, new_user, new_project):
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
# NOTE(jdg): need elevated context as we haven't "given" the vol
# yet
volume_ref = self.db.volume_get(context.elevated(), volume_id)
# NOTE(jdg): Some drivers tie provider info (CHAP) to tenant
# for those that do allow them to return updated model info
model_update = self.driver.accept_transfer(context,
volume_ref,
new_user,
new_project)
if model_update:
try:
self.db.volume_update(context.elevated(),
volume_id,
model_update)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed updating model of "
"volume %(volume_id)s "
"with drivers update %(model)s "
"during xfr.") %
{'volume_id': volume_id,
'model': model_update})
self.db.volume_update(context.elevated(),
volume_id,
{'status': 'error'})
return model_update
def _migrate_volume_generic(self, ctxt, volume, host, new_type_id):
rpcapi = volume_rpcapi.VolumeAPI()
# Create new volume on remote host
new_vol_values = {}
for k, v in volume.iteritems():
new_vol_values[k] = v
del new_vol_values['id']
del new_vol_values['_name_id']
# We don't copy volume_type because the db sets that according to
# volume_type_id, which we do copy
del new_vol_values['volume_type']
if new_type_id:
new_vol_values['volume_type_id'] = new_type_id
new_vol_values['host'] = host['host']
new_vol_values['status'] = 'creating'
# FIXME(jdg): using a : delimeter is confusing to
# me below here. We're adding a string member to a dict
# using a :, which is kind of a poor choice in this case
# I think
new_vol_values['migration_status'] = 'target:%s' % volume['id']
new_vol_values['attach_status'] = 'detached'
new_vol_values['volume_attachment'] = []
new_volume = self.db.volume_create(ctxt, new_vol_values)
rpcapi.create_volume(ctxt, new_volume, host['host'],
None, None, allow_reschedule=False)
# Wait for new_volume to become ready
starttime = time.time()
deadline = starttime + CONF.migration_create_volume_timeout_secs
new_volume = self.db.volume_get(ctxt, new_volume['id'])
tries = 0
while new_volume['status'] != 'available':
tries += 1
now = time.time()
if new_volume['status'] == 'error':
msg = _("failed to create new_volume on destination host")
self._clean_temporary_volume(ctxt, volume['id'],
new_volume['id'],
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
elif now > deadline:
msg = _("timeout creating new_volume on destination host")
self._clean_temporary_volume(ctxt, volume['id'],
new_volume['id'],
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
else:
time.sleep(tries ** 2)
new_volume = self.db.volume_get(ctxt, new_volume['id'])
# Copy the source volume to the destination volume
try:
attachments = volume['volume_attachment']
if not attachments:
self.driver.copy_volume_data(ctxt, volume, new_volume,
remote='dest')
# The above call is synchronous so we complete the migration
self.migrate_volume_completion(ctxt, volume['id'],
new_volume['id'],
error=False)
else:
nova_api = compute.API()
# This is an async call to Nova, which will call the completion
# when it's done
for attachment in attachments:
instance_uuid = attachment['instance_uuid']
nova_api.update_server_volume(ctxt, instance_uuid,
volume['id'],
new_volume['id'])
except Exception:
with excutils.save_and_reraise_exception():
msg = _LE("Failed to copy volume %(vol1)s to %(vol2)s")
LOG.error(msg, {'vol1': volume['id'],
'vol2': new_volume['id']})
self._clean_temporary_volume(ctxt, volume['id'],
new_volume['id'])
def _get_original_status(self, volume):
attachments = volume['volume_attachment']
if not attachments:
return 'available'
else:
return 'in-use'
def _clean_temporary_volume(self, ctxt, volume_id, new_volume_id,
clean_db_only=False):
volume = self.db.volume_get(ctxt, volume_id)
# If we're in the migrating phase, we need to cleanup
# destination volume because source volume is remaining
if volume['migration_status'] == 'migrating':
try:
if clean_db_only:
# The temporary volume is not created, only DB data
# is created
self.db.volume_destroy(ctxt, new_volume_id)
else:
# The temporary volume is already created
rpcapi = volume_rpcapi.VolumeAPI()
volume = self.db.volume_get(ctxt, new_volume_id)
rpcapi.delete_volume(ctxt, volume)
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find the temporary volume "
"%(vol)s in the database. There is no need "
"to clean up this volume."),
{'vol': new_volume_id})
else:
# If we're in the completing phase don't delete the
# destination because we may have already deleted the
# source! But the migration_status in database should
# be cleared to handle volume after migration failure
try:
updates = {'migration_status': None}
self.db.volume_update(ctxt, new_volume_id, updates)
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find destination volume "
"%(vol)s in the database. The entry might be "
"successfully deleted during migration "
"completion phase."),
{'vol': new_volume_id})
LOG.warning(_LW("Failed to migrate volume. The destination "
"volume %(vol)s is not deleted since the "
"source volume may have been deleted."),
{'vol': new_volume_id})
def migrate_volume_completion(self, ctxt, volume_id, new_volume_id,
error=False):
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
self.db.volume_update(ctxt, volume_id,
{'migration_status': 'error'})
msg = _("migrate_volume_completion: completing migration for "
"volume %(vol1)s (temporary volume %(vol2)s")
LOG.debug(msg % {'vol1': volume_id, 'vol2': new_volume_id})
volume = self.db.volume_get(ctxt, volume_id)
new_volume = self.db.volume_get(ctxt, new_volume_id)
rpcapi = volume_rpcapi.VolumeAPI()
orig_volume_status = self._get_original_status(volume)
if error:
msg = _("migrate_volume_completion is cleaning up an error "
"for volume %(vol1)s (temporary volume %(vol2)s")
LOG.info(msg % {'vol1': volume['id'],
'vol2': new_volume['id']})
rpcapi.delete_volume(ctxt, new_volume)
updates = {'migration_status': None, 'status': orig_volume_status}
self.db.volume_update(ctxt, volume_id, updates)
return volume_id
self.db.volume_update(ctxt, volume_id,
{'migration_status': 'completing'})
# Delete the source volume (if it fails, don't fail the migration)
try:
if orig_volume_status == 'in-use':
attachments = volume['volume_attachment']
for attachment in attachments:
self.detach_volume(ctxt, volume_id, attachment['id'])
self.delete_volume(ctxt, volume_id)
except Exception as ex:
msg = _("Failed to delete migration source vol %(vol)s: %(err)s")
LOG.error(msg % {'vol': volume_id, 'err': ex})
# Give driver (new_volume) a chance to update things as needed
# Note this needs to go through rpc to the host of the new volume
# the current host and driver object is for the "existing" volume
rpcapi.update_migrated_volume(ctxt,
volume,
new_volume)
self.db.finish_volume_migration(ctxt, volume_id, new_volume_id)
self.db.volume_destroy(ctxt, new_volume_id)
if orig_volume_status == 'in-use':
updates = {'migration_status': 'completing',
'status': orig_volume_status}
else:
updates = {'migration_status': None}
self.db.volume_update(ctxt, volume_id, updates)
if orig_volume_status == 'in-use':
attachments = volume['volume_attachment']
for attachment in attachments:
rpcapi.attach_volume(ctxt, volume,
attachment['instance_uuid'],
attachment['attached_host'],
attachment['mountpoint'],
'rw')
return volume['id']
def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False,
new_type_id=None):
"""Migrate the volume to the specified host (called on source host)."""
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
self.db.volume_update(ctxt, volume_id,
{'migration_status': 'error'})
volume_ref = self.db.volume_get(ctxt, volume_id)
model_update = None
moved = False
status_update = None
if volume_ref['status'] == 'retyping':
status_update = {'status': self._get_original_status(volume_ref)}
self.db.volume_update(ctxt, volume_ref['id'],
{'migration_status': 'migrating'})
if not force_host_copy and new_type_id is None:
try:
LOG.debug("volume %s: calling driver migrate_volume",
volume_ref['id'])
moved, model_update = self.driver.migrate_volume(ctxt,
volume_ref,
host)
if moved:
updates = {'host': host['host'],
'migration_status': None}
if status_update:
updates.update(status_update)
if model_update:
updates.update(model_update)
volume_ref = self.db.volume_update(ctxt,
volume_ref['id'],
updates)
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': None}
if status_update:
updates.update(status_update)
try:
model_update = self.driver.create_export(ctxt,
volume_ref)
if model_update:
updates.update(model_update)
except Exception:
LOG.exception(_LE("Failed to create export for "
"volume: %s"), volume_ref['id'])
finally:
self.db.volume_update(ctxt, volume_ref['id'], updates)
if not moved:
try:
self._migrate_volume_generic(ctxt, volume_ref, host,
new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': None}
if status_update:
updates.update(status_update)
try:
model_update = self.driver.create_export(ctxt,
volume_ref)
if model_update:
updates.update(model_update)
except Exception:
LOG.exception(_LE("Failed to create export for "
"volume: %s"), volume_ref['id'])
finally:
self.db.volume_update(ctxt, volume_ref['id'], updates)
@periodic_task.periodic_task
def _report_driver_status(self, context):
LOG.info(_LI("Updating volume status"))
if not self.driver.initialized:
if self.driver.configuration.config_group is None:
config_group = ''
else:
config_group = ('(config name %s)' %
self.driver.configuration.config_group)
LOG.warning(_LW('Unable to update stats, %(driver_name)s '
'-%(driver_version)s '
'%(config_group)s driver is uninitialized.') %
{'driver_name': self.driver.__class__.__name__,
'driver_version': self.driver.get_version(),
'config_group': config_group})
else:
volume_stats = self.driver.get_volume_stats(refresh=True)
if self.extra_capabilities:
volume_stats.update(self.extra_capabilities)
if volume_stats:
# Append volume stats with 'allocated_capacity_gb'
self._append_volume_stats(volume_stats)
# queue it to be sent to the Schedulers.
self.update_service_capabilities(volume_stats)
def _append_volume_stats(self, vol_stats):
pools = vol_stats.get('pools', None)
if pools and isinstance(pools, list):
for pool in pools:
pool_name = pool['pool_name']
try:
pool_stats = self.stats['pools'][pool_name]
except KeyError:
# Pool not found in volume manager
pool_stats = dict(allocated_capacity_gb=0)
pool.update(pool_stats)
def publish_service_capabilities(self, context):
"""Collect driver status and then publish."""
self._report_driver_status(context)
self._publish_service_capabilities(context)
def notification(self, context, event):
LOG.info(_LI("Notification {%s} received"), event)
def _notify_about_volume_usage(self,
context,
volume,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_snapshot_usage(self,
context,
snapshot,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_consistencygroup_usage(self,
context,
group,
event_suffix,
volumes=None,
extra_usage_info=None):
vol_utils.notify_about_consistencygroup_usage(
context, group, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not volumes:
volumes = self.db.volume_get_all_by_group(context, group['id'])
if volumes:
for volume in volumes:
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_cgsnapshot_usage(self,
context,
cgsnapshot,
event_suffix,
snapshots=None,
extra_usage_info=None):
vol_utils.notify_about_cgsnapshot_usage(
context, cgsnapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not snapshots:
snapshots = self.db.snapshot_get_all_for_cgsnapshot(
context, cgsnapshot['id'])
if snapshots:
for snapshot in snapshots:
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def extend_volume(self, context, volume_id, new_size, reservations):
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
self.db.volume_update(context, volume_id,
{'status': 'error_extending'})
volume = self.db.volume_get(context, volume_id)
size_increase = (int(new_size)) - volume['size']
self._notify_about_volume_usage(context, volume, "resize.start")
try:
LOG.info(_LI("volume %s: extending"), volume['id'])
self.driver.extend_volume(volume, new_size)
LOG.info(_LI("volume %s: extended successfully"), volume['id'])
except Exception:
LOG.exception(_LE("volume %s: Error trying to extend volume"),
volume_id)
try:
self.db.volume_update(context, volume['id'],
{'status': 'error_extending'})
raise exception.CinderException(_("Volume %s: Error trying "
"to extend volume") %
volume_id)
finally:
QUOTAS.rollback(context, reservations)
return
QUOTAS.commit(context, reservations)
volume = self.db.volume_update(context,
volume['id'],
{'size': int(new_size),
'status': 'available'})
pool = vol_utils.extract_host(volume['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume['host'], 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += size_increase
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=size_increase)
self._notify_about_volume_usage(
context, volume, "resize.end",
extra_usage_info={'size': int(new_size)})
def retype(self, ctxt, volume_id, new_type_id, host,
migration_policy='never', reservations=None):
def _retype_error(context, volume_id, old_reservations,
new_reservations, status_update):
try:
self.db.volume_update(context, volume_id, status_update)
finally:
QUOTAS.rollback(context, old_reservations)
QUOTAS.rollback(context, new_reservations)
context = ctxt.elevated()
volume_ref = self.db.volume_get(ctxt, volume_id)
status_update = {'status': self._get_original_status(volume_ref)}
if context.project_id != volume_ref['project_id']:
project_id = volume_ref['project_id']
else:
project_id = context.project_id
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
# NOTE(flaper87): Other exceptions in this method don't
# set the volume status to error. Should that be done
# here? Setting the volume back to it's original status
# for now.
self.db.volume_update(context, volume_id, status_update)
# Get old reservations
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume_ref['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
old_reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
self.db.volume_update(context, volume_id, status_update)
LOG.exception(_LE("Failed to update usages "
"while retyping volume."))
raise exception.CinderException(_("Failed to get old volume type"
" quota reservations"))
# We already got the new reservations
new_reservations = reservations
# If volume types have the same contents, no need to do anything
retyped = False
diff, all_equal = volume_types.volume_types_diff(
context, volume_ref.get('volume_type_id'), new_type_id)
if all_equal:
retyped = True
# Call driver to try and change the type
retype_model_update = None
if not retyped:
try:
new_type = volume_types.get_volume_type(context, new_type_id)
ret = self.driver.retype(context,
volume_ref,
new_type,
diff,
host)
# Check if the driver retype provided a model update or
# just a retype indication
if type(ret) == tuple:
retyped, retype_model_update = ret
else:
retyped = ret
if retyped:
LOG.info(_LI("Volume %s: retyped successfully"), volume_id)
except Exception as ex:
retyped = False
LOG.error(_LE("Volume %s: driver error when trying to retype, "
"falling back to generic mechanism."),
volume_ref['id'])
LOG.exception(ex)
# We could not change the type, so we need to migrate the volume, where
# the destination volume will be of the new type
if not retyped:
if migration_policy == 'never':
_retype_error(context, volume_id, old_reservations,
new_reservations, status_update)
msg = _("Retype requires migration but is not allowed.")
raise exception.VolumeMigrationFailed(reason=msg)
snaps = self.db.snapshot_get_all_for_volume(context,
volume_ref['id'])
if snaps:
_retype_error(context, volume_id, old_reservations,
new_reservations, status_update)
msg = _("Volume must not have snapshots.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Don't allow volume with replicas to be migrated
rep_status = volume_ref['replication_status']
if rep_status is not None and rep_status != 'disabled':
_retype_error(context, volume_id, old_reservations,
new_reservations, status_update)
msg = _("Volume must not be replicated.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
self.db.volume_update(context, volume_ref['id'],
{'migration_status': 'starting'})
try:
self.migrate_volume(context, volume_id, host,
new_type_id=new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
_retype_error(context, volume_id, old_reservations,
new_reservations, status_update)
else:
model_update = {'volume_type_id': new_type_id,
'host': host['host'],
'status': status_update['status']}
if retype_model_update:
model_update.update(retype_model_update)
self.db.volume_update(context, volume_id, model_update)
if old_reservations:
QUOTAS.commit(context, old_reservations, project_id=project_id)
if new_reservations:
QUOTAS.commit(context, new_reservations, project_id=project_id)
self.publish_service_capabilities(context)
def manage_existing(self, ctxt, volume_id, ref=None):
LOG.debug('manage_existing: managing %s.' % ref)
try:
flow_engine = manage_existing.get_flow(
ctxt,
self.db,
self.driver,
self.host,
volume_id,
ref)
except Exception:
LOG.exception(_LE("Failed to create manage_existing flow."))
raise exception.CinderException(
_("Failed to create manage existing flow."))
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
# Fetch created volume from storage
vol_ref = flow_engine.storage.fetch('volume')
# Update volume stats
pool = vol_utils.extract_host(vol_ref['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
vol_ref['host'], 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] \
+= vol_ref['size']
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=vol_ref['size'])
return vol_ref['id']
def promote_replica(self, ctxt, volume_id):
"""Promote volume replica secondary to be the primary volume."""
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed to promote replica "
"for volume %(id)s.")
% {'id': volume_id})
volume = self.db.volume_get(ctxt, volume_id)
model_update = None
try:
LOG.debug("Volume %s: promote replica.", volume_id)
model_update = self.driver.promote_replica(ctxt, volume)
except exception.CinderException:
err_msg = (_('Error promoting secondary volume to primary'))
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
try:
if model_update:
volume = self.db.volume_update(ctxt,
volume_id,
model_update)
except exception.CinderException:
err_msg = (_("Failed updating model"
" with driver provided model %(model)s") %
{'model': model_update})
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
def reenable_replication(self, ctxt, volume_id):
"""Re-enable replication of secondary volume with primary volumes."""
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed to sync replica for volume %(id)s.")
% {'id': volume_id})
volume = self.db.volume_get(ctxt, volume_id)
model_update = None
try:
LOG.debug("Volume %s: sync replica.", volume_id)
model_update = self.driver.reenable_replication(ctxt, volume)
except exception.CinderException:
err_msg = (_('Error synchronizing secondary volume to primary'))
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
try:
if model_update:
volume = self.db.volume_update(ctxt,
volume_id,
model_update)
except exception.CinderException:
err_msg = (_("Failed updating model"
" with driver provided model %(model)s") %
{'model': model_update})
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
def _update_replication_relationship_status(self, ctxt):
LOG.info(_LI('Updating volume replication status.'))
# Only want volumes that do not have a 'disabled' replication status
filters = {'replication_status': ['active', 'copying', 'error',
'active-stopped', 'inactive']}
volumes = self.db.volume_get_all_by_host(ctxt, self.host,
filters=filters)
for vol in volumes:
model_update = None
try:
model_update = self.driver.get_replication_status(
ctxt, vol)
if model_update:
self.db.volume_update(ctxt, vol['id'], model_update)
except Exception:
LOG.exception(_LE("Error checking replication status for "
"volume %s") % vol['id'])
def create_consistencygroup(self, context, group_id):
"""Creates the consistency group."""
context = context.elevated()
group_ref = self.db.consistencygroup_get(context, group_id)
group_ref['host'] = self.host
status = 'available'
model_update = False
self._notify_about_consistencygroup_usage(
context, group_ref, "create.start")
try:
utils.require_driver_initialized(self.driver)
LOG.info(_LI("Consistency group %s: creating"), group_ref['name'])
model_update = self.driver.create_consistencygroup(context,
group_ref)
if model_update:
group_ref = self.db.consistencygroup_update(
context, group_ref['id'], model_update)
except Exception:
with excutils.save_and_reraise_exception():
self.db.consistencygroup_update(
context,
group_ref['id'],
{'status': 'error'})
LOG.error(_LE("Consistency group %s: create failed"),
group_ref['name'])
now = timeutils.utcnow()
self.db.consistencygroup_update(context,
group_ref['id'],
{'status': status,
'created_at': now})
LOG.info(_LI("Consistency group %s: created successfully"),
group_ref['name'])
self._notify_about_consistencygroup_usage(
context, group_ref, "create.end")
return group_ref['id']
def create_consistencygroup_from_src(self, context, group_id,
cgsnapshot_id=None):
"""Creates the consistency group from source.
Currently the source can only be a cgsnapshot.
"""
group_ref = self.db.consistencygroup_get(context, group_id)
try:
volumes = self.db.volume_get_all_by_group(
context, group_id)
cgsnapshot = None
snapshots = None
if cgsnapshot_id:
try:
cgsnapshot = self.db.cgsnapshot_get(context, cgsnapshot_id)
except exception.CgSnapshotNotFound:
LOG.error(_LE("Cannot create consistency group %(group)s "
"because cgsnapshot %(snap)s cannot be "
"found."),
{'group': group_id,
'snap': cgsnapshot_id})
raise
if cgsnapshot:
snapshots = self.db.snapshot_get_all_for_cgsnapshot(
context, cgsnapshot_id)
for snap in snapshots:
if (snap['status'] not in
VALID_CREATE_CG_SRC_SNAP_STATUS):
msg = (_("Cannot create consistency group "
"%(group)s because snapshot %(snap)s is "
"not in a valid state. Valid states are: "
"%(valid)s.") %
{'group': group_id,
'snap': snap['id'],
'valid': VALID_CREATE_CG_SRC_SNAP_STATUS})
raise exception.InvalidConsistencyGroup(reason=msg)
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes.
sorted_snapshots = self._sort_snapshots(volumes, snapshots)
self._notify_about_consistencygroup_usage(
context, group_ref, "create.start")
utils.require_driver_initialized(self.driver)
LOG.info(_LI("Consistency group %(group)s: creating from source "
"cgsnapshot %(snap)s."),
{'group': group_id,
'snap': cgsnapshot_id})
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
context, group_ref, volumes, cgsnapshot,
sorted_snapshots))
if volumes_model_update:
for update in volumes_model_update:
self.db.volume_update(context, update['id'], update)
if model_update:
group_ref = self.db.consistencygroup_update(
context, group_id, model_update)
except Exception:
with excutils.save_and_reraise_exception():
self.db.consistencygroup_update(
context,
group_id,
{'status': 'error'})
LOG.error(_LE("Consistency group %(group)s: create from "
"source cgsnapshot %(snap)s failed."),
{'group': group_id,
'snap': cgsnapshot_id})
# Update volume status to 'error' as well.
for vol in volumes:
self.db.volume_update(
context, vol['id'], {'status': 'error'})
now = timeutils.utcnow()
status = 'available'
for vol in volumes:
update = {'status': status, 'created_at': now}
self._update_volume_from_src(context, vol, update,
group_id=group_id)
self._update_allocated_capacity(vol)
self.db.consistencygroup_update(context,
group_id,
{'status': status,
'created_at': now})
LOG.info(_LI("Consistency group %(group)s: created successfully "
"from source cgsnapshot %(snap)s."),
{'group': group_id,
'snap': cgsnapshot_id})
self._notify_about_consistencygroup_usage(
context, group_ref, "create.end")
return group_ref['id']
def _sort_snapshots(self, volumes, snapshots):
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes. Each source snapshot in the snapshots
# list should have a corresponding target volume in the volumes list.
if not volumes or not snapshots or len(volumes) != len(snapshots):
msg = _("Input volumes or snapshots are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_snapshots = []
for vol in volumes:
found_snaps = filter(
lambda snap: snap['id'] == vol['snapshot_id'], snapshots)
if not found_snaps:
LOG.error(_LE("Source snapshot cannot be found for target "
"volume %(volume_id)s."),
{'volume_id': vol['id']})
raise exception.SnapshotNotFound(
snapshot_id=vol['snapshot_id'])
sorted_snapshots.extend(found_snaps)
return sorted_snapshots
def _update_volume_from_src(self, context, vol, update, group_id=None):
try:
snapshot_ref = self.db.snapshot_get(context,
vol['snapshot_id'])
orig_vref = self.db.volume_get(context,
snapshot_ref['volume_id'])
if orig_vref.bootable:
update['bootable'] = True
self.db.volume_glance_metadata_copy_to_volume(
context, vol['id'], vol['snapshot_id'])
except exception.SnapshotNotFound:
LOG.error(_LE("Source snapshot %(snapshot_id)s cannot be found."),
{'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group_id:
self.db.consistencygroup_update(
context, group_id, {'status': 'error'})
raise
except exception.VolumeNotFound:
LOG.error(_LE("The source volume %(volume_id)s "
"cannot be found."),
{'volume_id': snapshot_ref['volume_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group_id:
self.db.consistencygroup_update(
context, group_id, {'status': 'error'})
raise
except exception.CinderException as ex:
LOG.error(_LE("Failed to update %(volume_id)s"
" metadata using the provided snapshot"
" %(snapshot_id)s metadata.") %
{'volume_id': vol['id'],
'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group_id:
self.db.consistencygroup_update(
context, group_id, {'status': 'error'})
raise exception.MetadataCopyFailure(reason=ex)
self.db.volume_update(context, vol['id'], update)
def _update_allocated_capacity(self, vol):
# Update allocated capacity in volume stats
pool = vol_utils.extract_host(vol['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
vol['host'], 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += (
vol['size'])
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=vol['size'])
def delete_consistencygroup(self, context, group_id):
"""Deletes consistency group and the volumes in the group."""
context = context.elevated()
group_ref = self.db.consistencygroup_get(context, group_id)
project_id = group_ref['project_id']
if context.project_id != group_ref['project_id']:
project_id = group_ref['project_id']
else:
project_id = context.project_id
LOG.info(_LI("Consistency group %s: deleting"), group_ref['id'])
volumes = self.db.volume_get_all_by_group(context, group_id)
for volume_ref in volumes:
if volume_ref['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_ref['id'])
# self.host is 'host@backend'
# volume_ref['host'] is 'host@backend#pool'
# Extract host before doing comparison
new_host = vol_utils.extract_host(volume_ref['host'])
if new_host != self.host:
raise exception.InvalidVolume(
reason=_("Volume is not local to this node"))
self._notify_about_consistencygroup_usage(
context, group_ref, "delete.start")
try:
utils.require_driver_initialized(self.driver)
LOG.debug("Consistency group %(group_id)s: deleting",
{'group_id': group_id})
model_update, volumes = self.driver.delete_consistencygroup(
context, group_ref)
if volumes:
for volume in volumes:
update = {'status': volume['status']}
self.db.volume_update(context, volume['id'],
update)
# If we failed to delete a volume, make sure the status
# for the cg is set to error as well
if (volume['status'] in ['error_deleting', 'error'] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = volume['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Error occurred when deleting consistency group '
'%s.') % group_ref['id'])
LOG.exception(msg)
raise exception.VolumeDriverException(message=msg)
else:
self.db.consistencygroup_update(context, group_ref['id'],
model_update)
except Exception:
with excutils.save_and_reraise_exception():
self.db.consistencygroup_update(
context,
group_ref['id'],
{'status': 'error_deleting'})
# Get reservations for group
try:
reserve_opts = {'consistencygroups': -1}
cgreservations = CGQUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
cgreservations = None
LOG.exception(_LE("Failed to update usages deleting "
"consistency groups."))
for volume_ref in volumes:
# Get reservations for volume
try:
volume_id = volume_ref['id']
reserve_opts = {'volumes': -1,
'gigabytes': -volume_ref['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting volume."))
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
self.db.volume_destroy(context, volume_id)
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.stats['allocated_capacity_gb'] -= volume_ref['size']
if cgreservations:
CGQUOTAS.commit(context, cgreservations,
project_id=project_id)
self.db.consistencygroup_destroy(context, group_id)
LOG.info(_LI("Consistency group %s: deleted successfully."),
group_id)
self._notify_about_consistencygroup_usage(
context, group_ref, "delete.end", volumes)
self.publish_service_capabilities(context)
return True
def update_consistencygroup(self, context, group_id,
add_volumes=None, remove_volumes=None):
"""Updates consistency group.
Update consistency group by adding volumes to the group,
or removing volumes from the group.
"""
LOG.info(_LI("Consistency group %s: updating"), group_id)
group = self.db.consistencygroup_get(context, group_id)
add_volumes_ref = []
remove_volumes_ref = []
add_volumes_list = []
remove_volumes_list = []
if add_volumes:
add_volumes_list = add_volumes.split(',')
if remove_volumes:
remove_volumes_list = remove_volumes.split(',')
for add_vol in add_volumes_list:
try:
add_vol_ref = self.db.volume_get(context, add_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Cannot add volume %(volume_id)s to consistency "
"group %(group_id)s because volume cannot be "
"found."),
{'volume_id': add_vol_ref['id'],
'group_id': group_id})
raise
if add_vol_ref['status'] not in ['in-use', 'available']:
msg = (_("Cannot add volume %(volume_id)s to consistency "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': add_vol_ref['id'],
'group_id': group_id,
'status': add_vol_ref['status'],
'valid': VALID_REMOVE_VOL_FROM_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
# self.host is 'host@backend'
# volume_ref['host'] is 'host@backend#pool'
# Extract host before doing comparison
new_host = vol_utils.extract_host(add_vol_ref['host'])
if new_host != self.host:
raise exception.InvalidVolume(
reason=_("Volume is not local to this node."))
add_volumes_ref.append(add_vol_ref)
for remove_vol in remove_volumes_list:
try:
remove_vol_ref = self.db.volume_get(context, remove_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Cannot remove volume %(volume_id)s from "
"consistency group %(group_id)s because volume "
"cannot be found."),
{'volume_id': remove_vol_ref['id'],
'group_id': group_id})
raise
remove_volumes_ref.append(remove_vol_ref)
self._notify_about_consistencygroup_usage(
context, group, "update.start")
try:
utils.require_driver_initialized(self.driver)
LOG.debug("Consistency group %(group_id)s: updating",
{'group_id': group['id']})
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_consistencygroup(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
if add_volumes_update:
for update in add_volumes_update:
self.db.volume_update(context, update['id'], update)
if remove_volumes_update:
for update in remove_volumes_update:
self.db.volume_update(context, update['id'], update)
if model_update:
if model_update['status'] in ['error']:
msg = (_('Error occurred when updating consistency group '
'%s.') % group_id)
LOG.exception(msg)
raise exception.VolumeDriverException(message=msg)
self.db.consistencygroup_update(context, group_id,
model_update)
except exception.VolumeDriverException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred in the volume driver when "
"updating consistency group %(group_id)s."),
{'group_id': group_id})
self.db.consistencygroup_update(context, group_id,
{'status': 'error'})
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when updating consistency "
"group %(group_id)s."),
{'group_id': group['id']})
self.db.consistencygroup_update(context, group_id,
{'status': 'error'})
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
now = timeutils.utcnow()
self.db.consistencygroup_update(context, group_id,
{'status': 'available',
'updated_at': now})
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'consistencygroup_id': group_id,
'updated_at': now})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'consistencygroup_id': None,
'updated_at': now})
LOG.info(_LI("Consistency group %s: updated successfully."),
group_id)
self._notify_about_consistencygroup_usage(
context, group, "update.end")
return True
def create_cgsnapshot(self, context, group_id, cgsnapshot_id):
"""Creates the cgsnapshot."""
caller_context = context
context = context.elevated()
cgsnapshot_ref = self.db.cgsnapshot_get(context, cgsnapshot_id)
LOG.info(_LI("Cgsnapshot %s: creating."), cgsnapshot_ref['id'])
snapshots = self.db.snapshot_get_all_for_cgsnapshot(context,
cgsnapshot_id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot_ref, "create.start")
try:
utils.require_driver_initialized(self.driver)
LOG.debug("Cgsnapshot %(cgsnap_id)s: creating.",
{'cgsnap_id': cgsnapshot_id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot_ref['context'] = caller_context
for snapshot in snapshots:
snapshot['context'] = caller_context
model_update, snapshots = \
self.driver.create_cgsnapshot(context, cgsnapshot_ref)
if snapshots:
for snapshot in snapshots:
# Update db if status is error
if snapshot['status'] == 'error':
update = {'status': snapshot['status']}
self.db.snapshot_update(context, snapshot['id'],
update)
# If status for one snapshot is error, make sure
# the status for the cgsnapshot is also error
if model_update['status'] != 'error':
model_update['status'] = snapshot['status']
if model_update:
if model_update['status'] == 'error':
msg = (_('Error occurred when creating cgsnapshot '
'%s.') % cgsnapshot_ref['id'])
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
except Exception:
with excutils.save_and_reraise_exception():
self.db.cgsnapshot_update(context,
cgsnapshot_ref['id'],
{'status': 'error'})
for snapshot in snapshots:
volume_id = snapshot['volume_id']
snapshot_id = snapshot['id']
vol_ref = self.db.volume_get(context, volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot['id'], volume_id)
except exception.CinderException as ex:
LOG.error(_LE("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata") %
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
self.db.snapshot_update(context,
snapshot['id'],
{'status': 'error'})
raise exception.MetadataCopyFailure(reason=ex)
self.db.snapshot_update(context,
snapshot['id'], {'status': 'available',
'progress': '100%'})
self.db.cgsnapshot_update(context,
cgsnapshot_ref['id'],
{'status': 'available'})
LOG.info(_LI("cgsnapshot %s: created successfully"),
cgsnapshot_ref['id'])
self._notify_about_cgsnapshot_usage(
context, cgsnapshot_ref, "create.end")
return cgsnapshot_id
def delete_cgsnapshot(self, context, cgsnapshot_id):
"""Deletes cgsnapshot."""
caller_context = context
context = context.elevated()
cgsnapshot_ref = self.db.cgsnapshot_get(context, cgsnapshot_id)
project_id = cgsnapshot_ref['project_id']
LOG.info(_LI("cgsnapshot %s: deleting"), cgsnapshot_ref['id'])
snapshots = self.db.snapshot_get_all_for_cgsnapshot(context,
cgsnapshot_id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot_ref, "delete.start")
try:
utils.require_driver_initialized(self.driver)
LOG.debug("cgsnapshot %(cgsnap_id)s: deleting",
{'cgsnap_id': cgsnapshot_id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot_ref['context'] = caller_context
for snapshot in snapshots:
snapshot['context'] = caller_context
model_update, snapshots = \
self.driver.delete_cgsnapshot(context, cgsnapshot_ref)
if snapshots:
for snapshot in snapshots:
update = {'status': snapshot['status']}
self.db.snapshot_update(context, snapshot['id'],
update)
if snapshot['status'] in ['error_deleting', 'error'] and \
model_update['status'] not in \
['error_deleting', 'error']:
model_update['status'] = snapshot['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Error occurred when deleting cgsnapshot '
'%s.') % cgsnapshot_ref['id'])
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
self.db.cgsnapshot_update(context, cgsnapshot_ref['id'],
model_update)
except Exception:
with excutils.save_and_reraise_exception():
self.db.cgsnapshot_update(context,
cgsnapshot_ref['id'],
{'status': 'error_deleting'})
for snapshot in snapshots:
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot['volume_size'],
}
volume_ref = self.db.volume_get(context, snapshot['volume_id'])
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting snapshot"))
self.db.volume_glance_metadata_delete_by_snapshot(context,
snapshot['id'])
self.db.snapshot_destroy(context, snapshot['id'])
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.db.cgsnapshot_destroy(context, cgsnapshot_id)
LOG.info(_LI("cgsnapshot %s: deleted successfully"),
cgsnapshot_ref['id'])
self._notify_about_cgsnapshot_usage(
context, cgsnapshot_ref, "delete.end", snapshots)
return True
def update_migrated_volume(self, ctxt, volume, new_volume):
"""Finalize migration process on backend device."""
model_update = None
model_update = self.driver.update_migrated_volume(ctxt,
volume,
new_volume)
if model_update:
self.db.volume_update(ctxt.elevated(),
volume['id'],
model_update)
|
tmenjo/cinder-2015.1.0
|
cinder/volume/manager.py
|
Python
|
apache-2.0
| 115,641
|
__author__ = 'frank'
import os
import os.path
import pprint
import re
import traceback
import urllib2
import zstacklib.utils.daemon as daemon
import zstacklib.utils.http as http
import zstacklib.utils.jsonobject as jsonobject
from zstacklib.utils import lock
from zstacklib.utils import linux
from zstacklib.utils import log
from zstacklib.utils import thread
from zstacklib.utils.bash import *
from zstacklib.utils.report import Report
from zstacklib.utils import shell
from zstacklib.utils.rollback import rollback, rollbackable
logger = log.get_logger(__name__)
class AgentResponse(object):
def __init__(self, success=True, error=None):
self.success = success
self.error = error if error else ''
self.totalCapacity = None
self.availableCapacity = None
class InitRsp(AgentResponse):
def __init__(self):
super(InitRsp, self).__init__()
self.fsid = None
class DownloadRsp(AgentResponse):
def __init__(self):
super(DownloadRsp, self).__init__()
self.size = None
self.actualSize = None
class UploadProgressRsp(AgentResponse):
def __init__(self):
super(UploadProgressRsp, self).__init__()
self.completed = False
self.progress = 0
self.size = 0
self.actualSize = 0
self.installPath = None
class GetImageSizeRsp(AgentResponse):
def __init__(self):
super(GetImageSizeRsp, self).__init__()
self.size = None
self.actualSize = None
class PingRsp(AgentResponse):
def __init__(self):
super(PingRsp, self).__init__()
self.failure = None
class GetFactsRsp(AgentResponse):
def __init__(self):
super(GetFactsRsp, self).__init__()
self.fsid = None
self.monAddr = None
class DeleteImageMetaDataResponse(AgentResponse):
def __init__(self):
super(DeleteImageMetaDataResponse,self).__init__()
self.ret = None
class WriteImageMetaDataResponse(AgentResponse):
def __init__(self):
super(WriteImageMetaDataResponse,self).__init__()
class GetImageMetaDataResponse(AgentResponse):
def __init__(self):
super(GetImageMetaDataResponse,self).__init__()
self.imagesMetadata= None
class DumpImageMetaDataToFileResponse(AgentResponse):
def __init__(self):
super(DumpImageMetaDataToFileResponse,self).__init__()
class CheckImageMetaDataFileExistResponse(AgentResponse):
def __init__(self):
super(CheckImageMetaDataFileExistResponse, self).__init__()
self.backupStorageMetaFileName = None
self.exist = None
class GetLocalFileSizeRsp(AgentResponse):
def __init__(self):
super(GetLocalFileSizeRsp, self).__init__()
self.size = None
def replyerror(func):
@functools.wraps(func)
def wrap(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
content = traceback.format_exc()
err = '%s\n%s\nargs:%s' % (str(e), content, pprint.pformat([args, kwargs]))
rsp = AgentResponse()
rsp.success = False
rsp.error = str(e)
logger.warn(err)
return jsonobject.dumps(rsp)
return wrap
class UploadTask(object):
def __init__(self, imageUuid, installPath, dstPath, tmpPath):
self.completed = False
self.imageUuid = imageUuid
self.installPath = installPath
self.dstPath = dstPath # without 'ceph://'
self.tmpPath = tmpPath # where image firstly imported to
self.expectedSize = 0
self.downloadedSize = 0
self.progress = 0
self.lastError = None
self.lastOpTime = linux.get_current_timestamp()
def fail(self, reason):
self.completed = True
self.lastError = reason
self.lastOpTime = linux.get_current_timestamp()
logger.info('task failed for %s: %s' % (self.imageUuid, reason))
def success(self):
self.completed = True
self.progress = 100
self.lastOpTime = linux.get_current_timestamp()
def is_started(self):
return self.progress > 0
def is_running(self):
return not(self.completed or self.is_started())
class UploadTasks(object):
MAX_RECORDS = 80
def __init__(self):
self.tasks = {}
def _expunge_oldest_task(self):
key, ts = '', linux.get_current_timestamp()
for k in self.tasks:
task = self.tasks[k]
if task.is_running():
continue
if task.lastOpTime < ts:
key, ts = k, task.lastOpTime
if key != '': del(self.tasks[key])
@lock.lock('ceph-upload-task')
def add_task(self, t):
if len(self.tasks) > self.MAX_RECORDS:
self._expunge_oldest_task()
self.tasks[t.imageUuid] = t
@lock.lock('ceph-upload-task')
def get_task(self, imageUuid):
return self.tasks.get(imageUuid)
# ------------------------------------------------------------------ #
class ProgressedFileWriter(object):
def __init__(self, wfd, pfunc):
self.wfd = wfd
self.pfunc = pfunc
self.bytesWritten = 0
def write(self, s):
self.wfd.write(s)
self.bytesWritten += len(s)
self.pfunc(self.bytesWritten)
def seek(self, offset, whence=None):
pass
import cherrypy
class CustomPart(cherrypy._cpreqbody.Part):
"""A customized multipart"""
maxrambytes = 0
def __init__(self, fp, headers, boundary, fifopath, pfunc):
cherrypy._cpreqbody.Part.__init__(self, fp, headers, boundary)
self.wfd = None
self.file = None
self.value = None
self.fifopath = fifopath
self.pfunc = pfunc
def make_file(self):
self.wfd = open(self.fifopath, 'w')
return ProgressedFileWriter(self.wfd, self.pfunc)
def get_boundary(entity):
ib = ""
if 'boundary' in entity.content_type.params:
# http://tools.ietf.org/html/rfc2046#section-5.1.1
# "The grammar for parameters on the Content-type field is such that it
# is often necessary to enclose the boundary parameter values in quotes
# on the Content-type line"
ib = entity.content_type.params['boundary'].strip('"')
if not re.match("^[ -~]{0,200}[!-~]$", ib):
raise ValueError('Invalid boundary in multipart form: %r' % (ib,))
ib = ('--' + ib).encode('ascii')
# Find the first marker
while True:
b = entity.readline()
if not b:
return
b = b.strip()
if b == ib:
break
return ib
def stream_body(task, fpath, entity, boundary):
def _progress_consumer(total):
task.downloadedSize = total
@thread.AsyncThread
def _do_import(task, fpath):
shell.check_run("cat %s | rbd import --image-format 2 - %s" % (fpath, task.tmpPath))
while True:
headers = cherrypy._cpreqbody.Part.read_headers(entity.fp)
p = CustomPart(entity.fp, headers, boundary, fpath, _progress_consumer)
if not p.filename:
continue
# start consumer
_do_import(task, fpath)
try:
p.process()
except Exception as e:
logger.warn('process image %s failed: %s' % (task.imageUuid, str(e)))
pass
finally:
if p.wfd is not None:
p.wfd.close()
break
if task.downloadedSize != task.expectedSize:
task.fail('incomplete upload, got %d, expect %d' % (task.downloadedSize, task.expectedSize))
shell.run('rbd rm %s' % task.tmpPath)
return
file_format = None
try:
file_format = linux.get_img_fmt('rbd:'+task.tmpPath)
except Exception as e:
task.fail('upload image %s failed: %s' % (task.imageUuid, str(e)))
return
if file_format == 'qcow2':
if linux.qcow2_get_backing_file('rbd:'+task.tmpPath):
task.fail('Qcow2 image %s has backing file' % task.imageUuid)
shell.run('rbd rm %s' % task.tmpPath)
return
conf_path = None
try:
with open('/etc/ceph/ceph.conf', 'r') as fd:
conf = fd.read()
conf = '%s\n%s\n' % (conf, 'rbd default format = 2')
conf_path = linux.write_to_temp_file(conf)
shell.check_run('qemu-img convert -f qcow2 -O rbd rbd:%s rbd:%s:conf=%s' % (task.tmpPath, task.dstPath, conf_path))
shell.check_run('rbd rm %s' % task.tmpPath)
finally:
if conf_path:
os.remove(conf_path)
else:
shell.check_run('rbd mv %s %s' % (task.tmpPath, task.dstPath))
task.success()
# ------------------------------------------------------------------ #
class CephAgent(object):
INIT_PATH = "/ceph/backupstorage/init"
DOWNLOAD_IMAGE_PATH = "/ceph/backupstorage/image/download"
UPLOAD_IMAGE_PATH = "/ceph/backupstorage/image/upload"
UPLOAD_PROGRESS_PATH = "/ceph/backupstorage/image/progress"
DELETE_IMAGE_PATH = "/ceph/backupstorage/image/delete"
PING_PATH = "/ceph/backupstorage/ping"
ECHO_PATH = "/ceph/backupstorage/echo"
GET_IMAGE_SIZE_PATH = "/ceph/backupstorage/image/getsize"
GET_FACTS = "/ceph/backupstorage/facts"
GET_IMAGES_METADATA = "/ceph/backupstorage/getimagesmetadata"
DELETE_IMAGES_METADATA = "/ceph/backupstorage/deleteimagesmetadata"
DUMP_IMAGE_METADATA_TO_FILE = "/ceph/backupstorage/dumpimagemetadatatofile"
CHECK_IMAGE_METADATA_FILE_EXIST = "/ceph/backupstorage/checkimagemetadatafileexist"
CHECK_POOL_PATH = "/ceph/backupstorage/checkpool"
GET_LOCAL_FILE_SIZE = "/ceph/backupstorage/getlocalfilesize/"
MIGRATE_IMAGE_PATH = "/ceph/backupstorage/image/migrate"
CEPH_METADATA_FILE = "bs_ceph_info.json"
UPLOAD_PROTO = "upload://"
LENGTH_OF_UUID = 32
http_server = http.HttpServer(port=7761)
http_server.logfile_path = log.get_logfile_path()
upload_tasks = UploadTasks()
def __init__(self):
self.http_server.register_async_uri(self.INIT_PATH, self.init)
self.http_server.register_async_uri(self.DOWNLOAD_IMAGE_PATH, self.download)
self.http_server.register_raw_uri(self.UPLOAD_IMAGE_PATH, self.upload)
self.http_server.register_async_uri(self.UPLOAD_PROGRESS_PATH, self.get_upload_progress)
self.http_server.register_async_uri(self.DELETE_IMAGE_PATH, self.delete)
self.http_server.register_async_uri(self.PING_PATH, self.ping)
self.http_server.register_async_uri(self.GET_IMAGE_SIZE_PATH, self.get_image_size)
self.http_server.register_async_uri(self.GET_FACTS, self.get_facts)
self.http_server.register_sync_uri(self.ECHO_PATH, self.echo)
self.http_server.register_async_uri(self.GET_IMAGES_METADATA, self.get_images_metadata)
self.http_server.register_async_uri(self.CHECK_IMAGE_METADATA_FILE_EXIST, self.check_image_metadata_file_exist)
self.http_server.register_async_uri(self.DUMP_IMAGE_METADATA_TO_FILE, self.dump_image_metadata_to_file)
self.http_server.register_async_uri(self.DELETE_IMAGES_METADATA, self.delete_image_metadata_from_file)
self.http_server.register_async_uri(self.CHECK_POOL_PATH, self.check_pool)
self.http_server.register_async_uri(self.GET_LOCAL_FILE_SIZE, self.get_local_file_size)
self.http_server.register_async_uri(self.MIGRATE_IMAGE_PATH, self.migrate_image)
def _get_capacity(self):
o = shell.call('ceph df -f json')
df = jsonobject.loads(o)
if df.stats.total_bytes__ is not None :
total = long(df.stats.total_bytes_)
elif df.stats.total_space__ is not None:
total = long(df.stats.total_space__) * 1024
else:
raise Exception('unknown ceph df output: %s' % o)
if df.stats.total_avail_bytes__ is not None:
avail = long(df.stats.total_avail_bytes_)
elif df.stats.total_avail__ is not None:
avail = long(df.stats.total_avail_) * 1024
else:
raise Exception('unknown ceph df output: %s' % o)
return total, avail
def _set_capacity_to_response(self, rsp):
total, avail = self._get_capacity()
rsp.totalCapacity = total
rsp.availableCapacity = avail
@replyerror
def echo(self, req):
logger.debug('get echoed')
return ''
def _normalize_install_path(self, path):
return path.lstrip('ceph:').lstrip('//')
def _get_file_size(self, path):
o = shell.call('rbd --format json info %s' % path)
o = jsonobject.loads(o)
return long(o.size_)
@replyerror
def get_image_size(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = GetImageSizeRsp()
path = self._normalize_install_path(cmd.installPath)
rsp.size = self._get_file_size(path)
return jsonobject.dumps(rsp)
def _read_file_content(self, path):
with open(path) as f:
return f.read()
@in_bash
@replyerror
def get_images_metadata(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
pool_name = cmd.poolName
bs_uuid = pool_name.split("-")[-1]
valid_images_info = ""
self.get_metadata_file(bs_uuid, self.CEPH_METADATA_FILE)
last_image_install_path = ""
bs_ceph_info_file = "/tmp/%s" % self.CEPH_METADATA_FILE
with open(bs_ceph_info_file) as fd:
images_info = fd.read()
for image_info in images_info.split('\n'):
if image_info != '':
image_json = jsonobject.loads(image_info)
# todo support multiple bs
image_uuid = image_json['uuid']
image_install_path = image_json["backupStorageRefs"][0]["installPath"]
ret = bash_r("rbd info %s" % image_install_path.split("//")[1])
if ret == 0 :
logger.info("Check image %s install path %s successfully!" % (image_uuid, image_install_path))
if image_install_path != last_image_install_path:
valid_images_info = image_info + '\n' + valid_images_info
last_image_install_path = image_install_path
else:
logger.warn("Image %s install path %s is invalid!" % (image_uuid, image_install_path))
self.put_metadata_file(bs_uuid, self.CEPH_METADATA_FILE)
rsp = GetImageMetaDataResponse()
rsp.imagesMetadata= valid_images_info
return jsonobject.dumps(rsp)
@in_bash
@replyerror
def check_image_metadata_file_exist(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
pool_name = cmd.poolName
bs_uuid = pool_name.split("-")[-1]
rsp = CheckImageMetaDataFileExistResponse()
rsp.backupStorageMetaFileName = self.CEPH_METADATA_FILE
ret, output = bash_ro("rados -p bak-t-%s stat %s" % (bs_uuid,self.CEPH_METADATA_FILE))
if ret == 0:
rsp.exist = True
else:
rsp.exist = False
return jsonobject.dumps(rsp)
def get_metadata_file(self, bs_uuid, file_name):
local_file_name = "/tmp/%s" % file_name
bash_ro("rm -rf %s" % local_file_name)
bash_ro("rados -p bak-t-%s get %s %s" % (bs_uuid, file_name, local_file_name))
def put_metadata_file(self, bs_uuid, file_name):
local_file_name = "/tmp/%s" % file_name
ret, output = bash_ro("rados -p bak-t-%s put %s %s" % (bs_uuid, file_name, local_file_name))
if ret == 0:
bash_ro("rm -rf %s" % local_file_name)
@in_bash
@replyerror
def dump_image_metadata_to_file(self, req):
def _write_info_to_metadata_file(fd):
strip_list_content = content[1:-1]
data_list = strip_list_content.split('},')
for item in data_list:
if item.endswith("}") is not True:
item = item + "}"
fd.write(item + '\n')
cmd = jsonobject.loads(req[http.REQUEST_BODY])
pool_name = cmd.poolName
bs_uuid = pool_name.split("-")[-1]
content = cmd.imageMetaData
dump_all_metadata = cmd.dumpAllMetaData
if dump_all_metadata is True:
# this means no metadata exist in ceph
bash_r("touch /tmp/%s" % self.CEPH_METADATA_FILE)
else:
self.get_metadata_file(bs_uuid, self.CEPH_METADATA_FILE)
bs_ceph_info_file = "/tmp/%s" % self.CEPH_METADATA_FILE
if content is not None:
if '[' == content[0] and ']' == content[-1]:
if dump_all_metadata is True:
with open(bs_ceph_info_file, 'w') as fd:
_write_info_to_metadata_file(fd)
else:
with open(bs_ceph_info_file, 'a') as fd:
_write_info_to_metadata_file(fd)
else:
# one image info
if dump_all_metadata is True:
with open(bs_ceph_info_file, 'w') as fd:
fd.write(content + '\n')
else:
with open(bs_ceph_info_file, 'a') as fd:
fd.write(content + '\n')
self.put_metadata_file(bs_uuid, self.CEPH_METADATA_FILE)
rsp = DumpImageMetaDataToFileResponse()
return jsonobject.dumps(rsp)
@in_bash
@replyerror
def delete_image_metadata_from_file(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
image_uuid = cmd.imageUuid
pool_name = cmd.poolName
bs_uuid = pool_name.split("-")[-1]
self.get_metadata_file(bs_uuid, self.CEPH_METADATA_FILE)
bs_ceph_info_file = "/tmp/%s" % self.CEPH_METADATA_FILE
ret, output = bash_ro("sed -i.bak '/%s/d' %s" % (image_uuid, bs_ceph_info_file))
self.put_metadata_file(bs_uuid, self.CEPH_METADATA_FILE)
rsp = DeleteImageMetaDataResponse()
rsp.ret = ret
return jsonobject.dumps(rsp)
@replyerror
@in_bash
def get_facts(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
o = bash_o('ceph mon_status')
mon_status = jsonobject.loads(o)
fsid = mon_status.monmap.fsid_
rsp = GetFactsRsp()
facts = bash_o('ceph -s -f json')
mon_facts = jsonobject.loads(facts)
for mon in mon_facts.monmap.mons:
ADDR = mon.addr.split(':')[0]
if bash_r('ip route | grep -w {{ADDR}} > /dev/null') == 0:
rsp.monAddr = ADDR
break
if not rsp.monAddr:
raise Exception('cannot find mon address of the mon server[%s]' % cmd.monUuid)
rsp.fsid = fsid
return jsonobject.dumps(rsp)
@replyerror
def init(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
o = shell.call('ceph mon_status')
mon_status = jsonobject.loads(o)
fsid = mon_status.monmap.fsid_
existing_pools = shell.call('ceph osd lspools')
for pool in cmd.pools:
if pool.predefined and pool.name not in existing_pools:
raise Exception('cannot find pool[%s] in the ceph cluster, you must create it manually' % pool.name)
elif pool.name not in existing_pools:
shell.check_run('ceph osd pool create %s 128' % pool.name)
rsp = InitRsp()
rsp.fsid = fsid
self._set_capacity_to_response(rsp)
return jsonobject.dumps(rsp)
def _parse_install_path(self, path):
return path.lstrip('ceph:').lstrip('//').split('/')
def _fail_task(self, task, reason):
task.fail(reason)
raise Exception(reason)
def _get_fifopath(self, uu):
import tempfile
d = tempfile.gettempdir()
return os.path.join(d, uu)
# handler for multipart upload, requires:
# - header X-IMAGE-UUID
# - header X-IMAGE-SIZE
def upload(self, req):
imageUuid = req.headers['X-IMAGE-UUID']
imageSize = req.headers['X-IMAGE-SIZE']
task = self.upload_tasks.get_task(imageUuid)
if task is None:
raise Exception('image not found %s' % imageUuid)
task.expectedSize = long(imageSize)
total, avail = self._get_capacity()
if avail <= task.expectedSize:
self._fail_task(task, 'capacity not enough for size: ' + imageSize)
entity = req.body
boundary = get_boundary(entity)
if not boundary:
self._fail_task(task, 'unexpected post form')
try:
# prepare the fifo to save image upload
fpath = self._get_fifopath(imageUuid)
linux.rm_file_force(fpath)
os.mkfifo(fpath)
stream_body(task, fpath, entity, boundary)
except Exception as e:
self._fail_task(task, str(e))
finally:
linux.rm_file_force(fpath)
def _prepare_upload(self, cmd):
start = len(self.UPLOAD_PROTO)
imageUuid = cmd.url[start:start+self.LENGTH_OF_UUID]
dstPath = self._normalize_install_path(cmd.installPath)
pool, image_name = self._parse_install_path(cmd.installPath)
tmp_image_name = 'tmp-%s' % image_name
tmpPath = '%s/%s' % (pool, tmp_image_name)
task = UploadTask(imageUuid, cmd.installPath, dstPath, tmpPath)
self.upload_tasks.add_task(task)
def _get_upload_path(self, req):
host = req[http.REQUEST_HEADER]['Host']
return 'http://' + host + self.UPLOAD_IMAGE_PATH
@replyerror
def get_upload_progress(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
task = self.upload_tasks.get_task(cmd.imageUuid)
if task is None:
raise Exception('image not found %s' % cmd.imageUuid)
rsp = UploadProgressRsp()
rsp.completed = task.completed
rsp.installPath = task.installPath
rsp.size = task.expectedSize
rsp.actualSize = task.expectedSize
if task.expectedSize == 0:
rsp.progress = 0
elif task.completed:
rsp.progress = 100
else:
rsp.progress = task.downloadedSize * 90 / task.expectedSize
if task.lastError is not None:
rsp.success = False
rsp.error = task.lastError
return jsonobject.dumps(rsp)
@replyerror
@rollback
def download(self, req):
rsp = DownloadRsp()
def isDerivedQcow2Image(path):
return getOriginalFormat(path) == "derivedQcow2"
def getOriginalFormat(path):
if path.startswith('http://') or path.startswith('https://'):
resp = urllib2.urlopen(path)
qhdr = resp.read(0x9007)
resp.close()
else:
resp = open(path)
qhdr = resp.read(0x9007)
resp.close()
if len(qhdr) < 0x9007:
return "raw"
if qhdr[:4] == 'QFI\xfb':
if qhdr[16:20] == '\x00\x00\x00\00':
return "qcow2"
else:
return "derivedQcow2"
if qhdr[0x8001:0x8006] == 'CD001':
return 'iso'
if qhdr[0x8801:0x8806] == 'CD001':
return 'iso'
if qhdr[0x9001:0x9006] == 'CD001':
return 'iso'
return "raw"
def fail_if_has_backing_file(fpath):
if isDerivedQcow2Image(fpath):
raise Exception('image has backing file or %s is not exist!' % fpath)
cmd = jsonobject.loads(req[http.REQUEST_BODY])
pool, image_name = self._parse_install_path(cmd.installPath)
tmp_image_name = 'tmp-%s' % image_name
@rollbackable
def _1():
shell.check_run('rbd rm %s/%s' % (pool, tmp_image_name))
def _getRealSize(length):
'''length looks like: 10245K'''
logger.debug(length)
if not length[-1].isalpha():
return length
units = {
"g": lambda x: x * 1024 * 1024 * 1024,
"m": lambda x: x * 1024 * 1024,
"k": lambda x: x * 1024,
}
try:
if not length[-1].isalpha():
return length
return units[length[-1].lower()](int(length[:-1]))
except:
logger.warn(linux.get_exception_stacktrace())
return length
# whether we have an upload request
if cmd.url.startswith(self.UPLOAD_PROTO):
self._prepare_upload(cmd)
rsp.size = 0
rsp.uploadPath = self._get_upload_path(req)
self._set_capacity_to_response(rsp)
return jsonobject.dumps(rsp)
if cmd.sendCommandUrl:
Report.url = cmd.sendCommandUrl
report = Report(cmd.threadContext, cmd.threadContextStack)
report.processType = "AddImage"
report.resourceUuid = cmd.imageUuid
report.progress_report("0", "start")
if cmd.url.startswith('http://') or cmd.url.startswith('https://'):
fail_if_has_backing_file(cmd.url)
image_format = getOriginalFormat(cmd.url)
cmd.url = linux.shellquote(cmd.url)
# roll back tmp ceph file after import it
_1()
PFILE = shell.call('mktemp /tmp/tmp-XXXXXX').strip()
content_length = shell.call('curl -sI %s|grep Content-Length' % cmd.url).strip().split()[1]
total = _getRealSize(content_length)
def _getProgress(synced):
logger.debug("getProgress in ceph-bs-agent, synced: %s, total: %s" % (synced, total))
last = shell.call('tail -1 %s' % PFILE).strip()
if not last or len(last.split()) < 1:
return synced
logger.debug("last synced: %s" % last)
written = _getRealSize(last.split()[0])
if total > 0 and synced < written:
synced = written
if synced < total:
percent = int(round(float(synced) / float(total) * 90))
report.progress_report(percent, "report")
return synced
logger.debug("content-length is: %s" % total)
_, _, err = bash_progress_1('set -o pipefail;wget --no-check-certificate -O - %s 2>%s| rbd import --image-format 2 - %s/%s'
% (cmd.url, PFILE, pool, tmp_image_name), _getProgress)
if err:
raise err
actual_size = linux.get_file_size_by_http_head(cmd.url)
if os.path.exists(PFILE):
os.remove(PFILE)
elif cmd.url.startswith('file://'):
src_path = cmd.url.lstrip('file:')
src_path = os.path.normpath(src_path)
if not os.path.isfile(src_path):
raise Exception('cannot find the file[%s]' % src_path)
fail_if_has_backing_file(src_path)
# roll back tmp ceph file after import it
_1()
image_format = getOriginalFormat(src_path)
shell.check_run("rbd import --image-format 2 %s %s/%s" % (src_path, pool, tmp_image_name))
actual_size = os.path.getsize(src_path)
else:
raise Exception('unknown url[%s]' % cmd.url)
file_format = shell.call(
"set -o pipefail; qemu-img info rbd:%s/%s | grep 'file format' | cut -d ':' -f 2" % (pool, tmp_image_name))
file_format = file_format.strip()
if file_format not in ['qcow2', 'raw']:
raise Exception('unknown image format: %s' % file_format)
if file_format == 'qcow2':
conf_path = None
try:
with open('/etc/ceph/ceph.conf', 'r') as fd:
conf = fd.read()
conf = '%s\n%s\n' % (conf, 'rbd default format = 2')
conf_path = linux.write_to_temp_file(conf)
shell.check_run('qemu-img convert -f qcow2 -O rbd rbd:%s/%s rbd:%s/%s:conf=%s' % (pool, tmp_image_name, pool, image_name, conf_path))
shell.check_run('rbd rm %s/%s' % (pool, tmp_image_name))
finally:
if conf_path:
os.remove(conf_path)
else:
shell.check_run('rbd mv %s/%s %s/%s' % (pool, tmp_image_name, pool, image_name))
report.progress_report("100", "finish")
@rollbackable
def _2():
shell.check_run('rbd rm %s/%s' % (pool, image_name))
_2()
o = shell.call('rbd --format json info %s/%s' % (pool, image_name))
image_stats = jsonobject.loads(o)
rsp.size = long(image_stats.size_)
rsp.actualSize = actual_size
if image_format == "qcow2":
rsp.format = "raw"
else:
rsp.format = image_format
self._set_capacity_to_response(rsp)
return jsonobject.dumps(rsp)
@replyerror
def ping(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = PingRsp()
facts = bash_o('ceph -s -f json')
mon_facts = jsonobject.loads(facts)
found = False
for mon in mon_facts.monmap.mons:
if cmd.monAddr in mon.addr:
found = True
break
if not found:
rsp.success = False
rsp.failure = "MonAddrChanged"
rsp.error = 'The mon addr is changed on the mon server[uuid:%s], not %s anymore.' \
'Reconnect the ceph primary storage' \
' may solve this issue' % (cmd.monUuid, cmd.monAddr)
return jsonobject.dumps(rsp)
create_img = shell.ShellCmd('rbd create %s --image-format 2 --size 1' % cmd.testImagePath)
create_img(False)
if create_img.return_code != 0 and 'File exists' not in create_img.stderr and 'File exists' not in create_img.stdout:
rsp.success = False
rsp.failure = 'UnableToCreateFile'
rsp.error = "%s %s" % (create_img.stderr, create_img.stdout)
else:
shell.run('rbd rm %s' % cmd.testImagePath)
return jsonobject.dumps(rsp)
@replyerror
def delete(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
pool, image_name = self._parse_install_path(cmd.installPath)
def delete_image(_):
# in case image is deleted, we don't have to wait for timeout
img = "%s/%s" % (pool, image_name)
shell.check_run('rbd info %s && rbd rm %s' % (img, img))
return True
# 'rbd rm' might fail due to client crash. We wait for 30 seconds as suggested by 'rbd'.
#
# rbd: error: image still has watchers
# This means the image is still open or the client using it crashed. Try again after
# closing/unmapping it or waiting 30s for the crashed client to timeout.
linux.wait_callback_success(delete_image, interval=5, timeout=30, ignore_exception_in_callback=True)
rsp = AgentResponse()
self._set_capacity_to_response(rsp)
return jsonobject.dumps(rsp)
@replyerror
def check_pool(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
existing_pools = shell.call('ceph osd lspools')
for pool in cmd.pools:
if pool.name not in existing_pools:
raise Exception('cannot find pool[%s] in the ceph cluster, you must create it manually' % pool.name)
return jsonobject.dumps(AgentResponse())
@replyerror
def get_local_file_size(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = GetLocalFileSizeRsp()
rsp.size = linux.get_local_file_size(cmd.path)
return jsonobject.dumps(rsp)
def _migrate_image(self, image_uuid, image_size, src_install_path, dst_install_path, dst_mon_addr, dst_mon_user, dst_mon_passwd, dst_mon_port):
src_install_path = self._normalize_install_path(src_install_path)
dst_install_path = self._normalize_install_path(dst_install_path)
rst = shell.run('rbd export %s - | tee >(md5sum >/tmp/%s_src_md5) | sshpass -p "%s" ssh -o StrictHostKeyChecking=no %s@%s -p %s \'tee >(md5sum >/tmp/%s_dst_md5) | rbd import - %s\'' % (src_install_path, image_uuid, dst_mon_passwd, dst_mon_user, dst_mon_addr, dst_mon_port, image_uuid, dst_install_path))
if rst != 0:
return rst
src_md5 = self._read_file_content('/tmp/%s_src_md5' % image_uuid)
dst_md5 = shell.call('sshpass -p "%s" ssh -o StrictHostKeyChecking=no %s@%s -p %s \'cat /tmp/%s_dst_md5\'' % (dst_mon_passwd, dst_mon_user, dst_mon_addr, dst_mon_port, image_uuid))
if src_md5 != dst_md5:
return -1
else:
return 0
@replyerror
@in_bash
def migrate_image(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
rst = self._migrate_image(cmd.imageUuid, cmd.imageSize, cmd.srcInstallPath, cmd.dstInstallPath, cmd.dstMonHostname, cmd.dstMonSshUsername, cmd.dstMonSshPassword, cmd.dstMonSshPort)
if rst != 0:
rsp.success = False
rsp.error = "Failed to migrate image from one ceph backup storage to another."
self._set_capacity_to_response(rsp)
return jsonobject.dumps(rsp)
class CephDaemon(daemon.Daemon):
def __init__(self, pidfile):
super(CephDaemon, self).__init__(pidfile)
def run(self):
self.agent = CephAgent()
self.agent.http_server.start()
|
live4thee/zstack-utility
|
cephbackupstorage/cephbackupstorage/cephagent.py
|
Python
|
apache-2.0
| 33,834
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
from oslo_log import log as logging
from designate import utils
from designate.api.v2.controllers import rest
from designate.objects import TsigKey
from designate.objects.adapters import DesignateAdapter
LOG = logging.getLogger(__name__)
class TsigKeysController(rest.RestController):
SORT_KEYS = ['created_at', 'id', 'updated_at', 'name']
@pecan.expose(template='json:', content_type='application/json')
@utils.validate_uuid('tsigkey_id')
def get_one(self, tsigkey_id):
"""Get TsigKey"""
request = pecan.request
context = request.environ['context']
return DesignateAdapter.render(
'API_v2',
self.central_api.get_tsigkey(context, tsigkey_id),
request=request)
@pecan.expose(template='json:', content_type='application/json')
def get_all(self, **params):
"""List all TsigKeys"""
request = pecan.request
context = request.environ['context']
# Extract the pagination params
marker, limit, sort_key, sort_dir = utils.get_paging_params(
params, self.SORT_KEYS)
# Extract any filter params
accepted_filters = ('name', 'algorithm', 'scope')
criterion = self._apply_filter_params(
params, accepted_filters, {})
return DesignateAdapter.render(
'API_v2',
self.central_api.find_tsigkeys(
context, criterion, marker, limit, sort_key, sort_dir),
request=request)
@pecan.expose(template='json:', content_type='application/json')
def post_all(self):
"""Create TsigKey"""
request = pecan.request
response = pecan.response
context = request.environ['context']
body = request.body_dict
tsigkey = DesignateAdapter.parse('API_v2', body, TsigKey())
tsigkey.validate()
# Create the tsigkey
tsigkey = self.central_api.create_tsigkey(
context, tsigkey)
tsigkey = DesignateAdapter.render('API_v2', tsigkey, request=request)
response.headers['Location'] = tsigkey['links']['self']
response.status_int = 201
# Prepare and return the response body
return tsigkey
@pecan.expose(template='json:', content_type='application/json')
@pecan.expose(template='json:', content_type='application/json-patch+json')
@utils.validate_uuid('tsigkey_id')
def patch_one(self, tsigkey_id):
"""Update TsigKey"""
request = pecan.request
context = request.environ['context']
body = request.body_dict
response = pecan.response
if request.content_type == 'application/json-patch+json':
raise NotImplemented('json-patch not implemented')
# Fetch the existing tsigkey entry
tsigkey = self.central_api.get_tsigkey(context, tsigkey_id)
tsigkey = DesignateAdapter.parse('API_v2', body, tsigkey)
# Validate the new set of data
tsigkey.validate()
# Update and persist the resource
tsigkey = self.central_api.update_tsigkey(context, tsigkey)
response.status_int = 200
return DesignateAdapter.render('API_v2', tsigkey, request=request)
@pecan.expose(template=None, content_type='application/json')
@utils.validate_uuid('tsigkey_id')
def delete_one(self, tsigkey_id):
"""Delete TsigKey"""
request = pecan.request
response = pecan.response
context = request.environ['context']
self.central_api.delete_tsigkey(context, tsigkey_id)
response.status_int = 204
# NOTE: This is a hack and a half.. But Pecan needs it.
return ''
|
grahamhayes/designate
|
designate/api/v2/controllers/tsigkeys.py
|
Python
|
apache-2.0
| 4,349
|
from selenium.webdriver.chrome.webdriver import WebDriver
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from fixture.session import SessionHelper
from fixture.mk import MkHelper
from fixture.cas import CasHelper
class Application:
def __init__(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
self.session = SessionHelper(self)
self.mk = MkHelper(self)
self.cas = CasHelper(self)
def open_home_page(self):
wd = self.wd
wd.get("https://new.kyivstar.ua/ecare/")
wd.maximize_window()
def destroy(self):
self.wd.quit()
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False
|
AlexBenyuh/python_training
|
fixture/application.py
|
Python
|
apache-2.0
| 897
|
#!/usr/bin/env/python
# -*-coding : utf-8 -*-
<<<<<<< HEAD
'''
Given a rectangular matrix of characters, add a border of asterisks(*) to it.
For
picture = ["abc",
"ded"]
the output should be
addBorder(picture) = ["*****",
"*abc*",
"*ded*",
"*****"]
'''
=======
"""
Given a rectangular matrix of characters, add a border of asterisks(*) to it.
"""
>>>>>>> 21f9d6ac126a25331fed7b68271620f143e23a1d
def addBorder(picture):
picture = [''.join(('*',i,'*')) for i in picture]
for item in picture:
border = '*'*len(item)
indx = [0,(len(picture)+1)]
for item in indx:
picture.insert(item,border)
return picture
if __name__ == '__main__':
picture = ["a"]
print addBorder(picture)
<<<<<<< HEAD
=======
>>>>>>> 21f9d6ac126a25331fed7b68271620f143e23a1d
|
mayababuji/MyCodefights
|
addBorder_BACKUP_4896.py
|
Python
|
apache-2.0
| 872
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import tensorflow as tf
from niftynet.layer import layer_util
class DilatedTensor(object):
"""
This context manager makes a wrapper of input_tensor
When created, the input_tensor is dilated,
the input_tensor resumes to original space when exiting the context.
"""
def __init__(self, input_tensor, dilation_factor):
assert (layer_util.check_spatial_dims(
input_tensor, lambda x: x % dilation_factor == 0))
self._tensor = input_tensor
self.dilation_factor = dilation_factor
# parameters to transform input tensor
self.spatial_rank = layer_util.infer_spatial_rank(self._tensor)
self.zero_paddings = [[0, 0]] * self.spatial_rank
self.block_shape = [dilation_factor] * self.spatial_rank
def __enter__(self):
if self.dilation_factor > 1:
self._tensor = tf.space_to_batch_nd(self._tensor,
self.block_shape,
self.zero_paddings,
name='dilated')
return self
def __exit__(self, *args):
if self.dilation_factor > 1:
self._tensor = tf.batch_to_space_nd(self._tensor,
self.block_shape,
self.zero_paddings,
name='de-dilate')
@property
def tensor(self):
return self._tensor
@tensor.setter
def tensor(self, value):
self._tensor = value
|
NifTK/NiftyNet
|
niftynet/layer/dilatedcontext.py
|
Python
|
apache-2.0
| 1,675
|
# These color schemes come from d3: http://d3js.org/
#
# They are licensed under the following license:
#
# Copyright (c) 2010-2015, Michael Bostock
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * The name Michael Bostock may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL MICHAEL BOSTOCK BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#: 10 colors that work well together as data category colors
CATEGORY10 = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
#: 20 colors that work well together as data category colors
CATEGORY20 = ['#1f77b4', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c', '#98df8a',
'#d62728', '#ff9896', '#9467bd', '#c5b0d5', '#8c564b', '#c49c94',
'#e377c2', '#f7b6d2', '#7f7f7f', '#c7c7c7', '#bcbd22', '#dbdb8d',
'#17becf', '#9edae5']
#: 20 colors that work well together as data category colors
CATEGORY20b = ['#393b79', '#5254a3', '#6b6ecf', '#9c9ede', '#637939',
'#8ca252', '#b5cf6b', '#cedb9c', '#8c6d31', '#bd9e39',
'#e7ba52', '#e7cb94', '#843c39', '#ad494a', '#d6616b',
'#e7969c', '#7b4173', '#a55194', '#ce6dbd', '#de9ed6']
#: 20 colors that work well together as data category colors
CATEGORY20c = ['#3182bd', '#6baed6', '#9ecae1', '#c6dbef', '#e6550d',
'#fd8d3c', '#fdae6b', '#fdd0a2', '#31a354', '#74c476',
'#a1d99b', '#c7e9c0', '#756bb1', '#9e9ac8', '#bcbddc',
'#dadaeb', '#636363', '#969696', '#bdbdbd', '#d9d9d9']
|
rmenegaux/bqplot
|
bqplot/colorschemes.py
|
Python
|
apache-2.0
| 2,813
|
# Copyright 2012 Leonidas Poulopoulos
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
leopoul/mupy
|
muparse/__init__.py
|
Python
|
apache-2.0
| 582
|
import shutil
import numpy as np
import dill
import gzip
import os
import subprocess
import struct
from array import array
import warnings
from dps import cfg
from dps.utils import image_to_string, cd, resize_image
# This link seems not to work anymore...
# emnist_url = 'https://cloudstor.aarnet.edu.au/plus/index.php/s/54h3OuGJhFLwAlQ/download'
emnist_url = 'http://www.itl.nist.gov/iaui/vip/cs_links/EMNIST/gzip.zip'
template = 'emnist-byclass-{}-{}-idx{}-ubyte.gz'
emnist_gz_names = [
template.format('test', 'images', 3),
template.format('test', 'labels', 1),
template.format('train', 'images', 3),
template.format('train', 'labels', 1)
]
def emnist_classes():
return (
[str(i) for i in range(10)]
+ [chr(i + ord('A')) for i in range(26)]
+ [chr(i + ord('a')) for i in range(26)]
)
emnist_filenames = [c + ".pklz" for c in emnist_classes()]
def _validate_emnist(path):
if not os.path.isdir(path):
return False
return set(os.listdir(path)) == set(emnist_filenames)
def _download_emnist(data_dir):
"""
Download the emnist data. Result is that a directory called "emnist_raw"
is created inside `data_dir` which contains 4 files.
Parameters
----------
path: str
Path to directory where files should be stored.
"""
emnist_raw_dir = os.path.join(data_dir, "emnist_raw")
os.makedirs(emnist_raw_dir, exist_ok=True)
with cd(emnist_raw_dir):
if not os.path.exists('gzip.zip'):
print("Downloading...")
command = "wget --output-document=gzip.zip {}".format(emnist_url).split()
subprocess.run(command, check=True)
else:
print("Found existing copy of gzip.zip, not downloading.")
print("Extracting...")
for fname in emnist_gz_names:
if not os.path.exists(fname):
subprocess.run('unzip gzip.zip gzip/{}'.format(fname), shell=True, check=True)
shutil.move('gzip/{}'.format(fname), '.')
else:
print("{} already exists, skipping extraction.".format(fname))
try:
shutil.rmtree('gzip')
except FileNotFoundError:
pass
return emnist_raw_dir
def _emnist_load_helper(path_img, path_lbl):
with gzip.open(path_lbl, 'rb') as file:
magic, size = struct.unpack(">II", file.read(8))
if magic != 2049:
raise ValueError('Magic number mismatch, expected 2049,'
'got {}'.format(magic))
labels = array("B", file.read())
with gzip.open(path_img, 'rb') as file:
magic, size, rows, cols = struct.unpack(">IIII", file.read(16))
if magic != 2051:
raise ValueError('Magic number mismatch, expected 2051,'
'got {}'.format(magic))
image_data = array("B", file.read())
images = np.zeros((size, rows * cols), dtype=np.uint8)
for i in range(size):
images[i][:] = image_data[i * rows * cols:(i + 1) * rows * cols]
return np.array(images, dtype=np.uint8), np.array(labels, dtype=np.uint8)
def maybe_convert_emnist_shape(path, shape):
""" Create a version of emnist on disk that is reshaped to the desired shape.
Images are stored on disk as uint8.
"""
if shape == (28, 28):
return
shape_dir = os.path.join(path, 'emnist_{}_by_{}'.format(*shape))
if os.path.isdir(shape_dir):
return
emnist_dir = os.path.join(path, 'emnist')
print("Converting (28, 28) EMNIST dataset to {}...".format(shape))
try:
shutil.rmtree(shape_dir)
except FileNotFoundError:
pass
os.makedirs(shape_dir, exist_ok=False)
classes = ''.join(
[str(i) for i in range(10)]
+ [chr(i + ord('A')) for i in range(26)]
+ [chr(i + ord('a')) for i in range(26)]
)
for i, cls in enumerate(sorted(classes)):
with gzip.open(os.path.join(emnist_dir, str(cls) + '.pklz'), 'rb') as f:
_x = dill.load(f)
new_x = []
for img in _x[:10]:
img = resize_image(img, shape, preserve_range=True)
new_x.append(img)
print(cls)
print(image_to_string(_x[0]))
_x = np.array(new_x, dtype=_x.dtype)
print(image_to_string(_x[0]))
path_i = os.path.join(shape_dir, cls + '.pklz')
with gzip.open(path_i, 'wb') as f:
dill.dump(_x, f, protocol=dill.HIGHEST_PROTOCOL)
def maybe_download_emnist(data_dir, quiet=0, shape=None):
"""
Download emnist data if it hasn't already been downloaded. Do some
post-processing to put it in a more useful format. End result is a directory
called `emnist-byclass` which contains a separate pklz file for each emnist
class.
Pixel values of stored images are uint8 values up to 255.
Images for each class are put into a numpy array with shape (n_images_in_class, 28, 28).
This numpy array is pickled and stored in a zip file with name <class char>.pklz.
Parameters
----------
data_dir: str
Directory where files should be stored.
"""
emnist_dir = os.path.join(data_dir, 'emnist')
if _validate_emnist(emnist_dir):
print("EMNIST data seems to be present already.")
else:
print("EMNIST data not found, downloading and processing...")
try:
shutil.rmtree(emnist_dir)
except FileNotFoundError:
pass
raw_dir = _download_emnist(data_dir)
with cd(raw_dir):
images, labels = _emnist_load_helper(emnist_gz_names[0], emnist_gz_names[1])
images1, labels1 = _emnist_load_helper(emnist_gz_names[2], emnist_gz_names[3])
with cd(data_dir):
os.makedirs('emnist', exist_ok=False)
print("Processing...")
with cd('emnist'):
x = np.concatenate((images, images1), 0)
y = np.concatenate((labels, labels1), 0)
# Give images the right orientation so that plt.imshow(x[0]) just works.
x = np.moveaxis(x.reshape(-1, 28, 28), 1, 2)
for i in sorted(set(y.flatten())):
keep = y == i
x_i = x[keep.flatten(), :]
if i >= 36:
char = chr(i-36+ord('a'))
elif i >= 10:
char = chr(i-10+ord('A'))
else:
char = str(i)
if quiet >= 2:
pass
elif quiet == 1:
print(char)
elif quiet <= 0:
print(char)
print(image_to_string(x_i[0, ...]))
file_i = char + '.pklz'
with gzip.open(file_i, 'wb') as f:
dill.dump(x_i, f, protocol=dill.HIGHEST_PROTOCOL)
if shape is not None:
maybe_convert_emnist_shape(data_dir, shape)
def load_emnist(
classes, balance=False, include_blank=False,
shape=None, n_examples=None, example_range=None, show=False, path=None):
""" Load emnist data from disk by class.
Elements of `classes` pick out which emnist classes to load, but different labels
end up getting returned because most classifiers require that the labels
be in range(len(classes)). We return a dictionary `class_map` which maps from
elements of `classes` down to range(len(classes)).
Pixel values of returned images are integers in the range 0-255, but stored as float32.
Returned X array has shape (n_images,) + shape.
Parameters
----------
path: str
Path to data directory, assumed to contain a sub-directory called `emnist`.
classes: list of character from the set (0-9, A-Z, a-z)
Each character is the name of a class to load.
balance: bool
If True, will ensure that all classes are balanced by removing elements
from classes that are larger than the minimu-size class.
include_blank: bool
If True, includes an additional class that consists of blank images.
shape: (int, int)
Shape of the images.
n_examples: int
Maximum number of examples returned. If not supplied, return all available data.
example_range: pair of floats
Pair of floats specifying, for each class, the range of examples that should be used.
Each element of the pair is a number in (0, 1), and the second number should be larger.
show: bool
If True, prints out an image from each class.
"""
if path is None:
path = cfg.data_dir
maybe_download_emnist(path, shape=shape)
emnist_dir = os.path.join(path, 'emnist')
classes = list(classes) + []
needs_reshape = False
if shape and shape != (28, 28):
resized_dir = os.path.join(path, 'emnist_{}_by_{}'.format(*shape))
if _validate_emnist(resized_dir):
emnist_dir = resized_dir
else:
needs_reshape = True
if example_range is not None:
assert 0.0 <= example_range[0] < example_range[1] <= 1.0
x, y = [], []
class_count = []
classes = sorted([str(s) for s in classes])
for i, cls in enumerate(classes):
with gzip.open(os.path.join(emnist_dir, str(cls) + '.pklz'), 'rb') as f:
_x = dill.load(f)
if example_range is not None:
low = int(example_range[0] * len(_x))
high = int(example_range[1] * len(_x))
_x = _x[low:high, ...]
x.append(_x)
y.extend([i] * _x.shape[0])
if show:
print(cls)
indices_to_show = np.random.choice(len(_x), size=100)
for i in indices_to_show:
print(image_to_string(_x[i]))
class_count.append(_x.shape[0])
x = np.concatenate(x, axis=0)
if include_blank:
min_class_count = min(class_count)
blanks = np.zeros((min_class_count,) + x.shape[1:], dtype=np.uint8)
x = np.concatenate((x, blanks), axis=0)
blank_idx = len(classes)
y.extend([blank_idx] * min_class_count)
blank_symbol = ' '
classes.append(blank_symbol)
y = np.array(y)
if balance:
min_class_count = min(class_count)
keep_x, keep_y = [], []
for i, cls in enumerate(classes):
keep_indices = np.nonzero(y == i)[0]
keep_indices = keep_indices[:min_class_count]
keep_x.append(x[keep_indices])
keep_y.append(y[keep_indices])
x = np.concatenate(keep_x, axis=0)
y = np.concatenate(keep_y, axis=0)
order = np.random.permutation(x.shape[0])
x = x[order]
y = y[order]
if n_examples:
x = x[:n_examples]
y = y[:n_examples]
if needs_reshape:
if x.shape[0] > 10000:
warnings.warn(
"Performing an online resize of a large number of images ({}), "
"consider creating and storing the resized dataset.".format(x.shape[0])
)
x = [resize_image(img, shape) for img in x]
x = np.uint8(x)
if show:
indices_to_show = np.random.choice(len(x), size=200)
for i in indices_to_show:
print(y[i])
print(image_to_string(x[i]))
return x, y, classes
|
e2crawfo/dps
|
dps/datasets/load/emnist.py
|
Python
|
apache-2.0
| 11,445
|
"""
pygments.lexers.jvm
~~~~~~~~~~~~~~~~~~~
Pygments lexers for JVM languages.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \
this, combined, default, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
from pygments.util import shebang_matches
from pygments import unistring as uni
__all__ = ['JavaLexer', 'ScalaLexer', 'GosuLexer', 'GosuTemplateLexer',
'GroovyLexer', 'IokeLexer', 'ClojureLexer', 'ClojureScriptLexer',
'KotlinLexer', 'XtendLexer', 'AspectJLexer', 'CeylonLexer',
'PigLexer', 'GoloLexer', 'JasminLexer', 'SarlLexer']
class JavaLexer(RegexLexer):
"""
For `Java <https://www.oracle.com/technetwork/java/>`_ source code.
"""
name = 'Java'
aliases = ['java']
filenames = ['*.java']
mimetypes = ['text/x-java']
flags = re.MULTILINE | re.DOTALL | re.UNICODE
tokens = {
'root': [
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
# keywords: go before method names to avoid lexing "throw new XYZ"
# as a method signature
(r'(assert|break|case|catch|continue|default|do|else|finally|for|'
r'if|goto|instanceof|new|return|switch|this|throw|try|while)\b',
Keyword),
# method names
(r'((?:(?:[^\W\d]|\$)[\w.\[\]$<>]*\s+)+?)' # return arguments
r'((?:[^\W\d]|\$)[\w$]*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Punctuation)),
(r'@[^\W\d][\w.]*', Name.Decorator),
(r'(abstract|const|enum|extends|final|implements|native|private|'
r'protected|public|static|strictfp|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Declaration),
(r'(boolean|byte|char|double|float|int|long|short|void)\b',
Keyword.Type),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r'(true|false|null)\b', Keyword.Constant),
(r'(class|interface)(\s+)', bygroups(Keyword.Declaration, Text),
'class'),
(r'(var)(\s+)', bygroups(Keyword.Declaration, Text),
'var'),
(r'(import(?:\s+static)?)(\s+)', bygroups(Keyword.Namespace, Text),
'import'),
(r'"', String, 'string'),
(r"'\\.'|'[^\\]'|'\\u[0-9a-fA-F]{4}'", String.Char),
(r'(\.)((?:[^\W\d]|\$)[\w$]*)', bygroups(Punctuation,
Name.Attribute)),
(r'^(\s*)(default)(:)', bygroups(Text, Keyword, Punctuation)),
(r'^(\s*)((?:[^\W\d]|\$)[\w$]*)(:)', bygroups(Text, Name.Label,
Punctuation)),
(r'([^\W\d]|\$)[\w$]*', Name),
(r'([0-9][0-9_]*\.([0-9][0-9_]*)?|'
r'\.[0-9][0-9_]*)'
r'([eE][+\-]?[0-9][0-9_]*)?[fFdD]?|'
r'[0-9][eE][+\-]?[0-9][0-9_]*[fFdD]?|'
r'[0-9]([eE][+\-]?[0-9][0-9_]*)?[fFdD]|'
r'0[xX]([0-9a-fA-F][0-9a-fA-F_]*\.?|'
r'([0-9a-fA-F][0-9a-fA-F_]*)?\.[0-9a-fA-F][0-9a-fA-F_]*)'
r'[pP][+\-]?[0-9][0-9_]*[fFdD]?', Number.Float),
(r'0[xX][0-9a-fA-F][0-9a-fA-F_]*[lL]?', Number.Hex),
(r'0[bB][01][01_]*[lL]?', Number.Bin),
(r'0[0-7_]+[lL]?', Number.Oct),
(r'0|[1-9][0-9_]*[lL]?', Number.Integer),
(r'[~^*!%&\[\]<>|+=/?-]', Operator),
(r'[{}();:.,]', Punctuation),
(r'\n', Text)
],
'class': [
(r'([^\W\d]|\$)[\w$]*', Name.Class, '#pop')
],
'var': [
(r'([^\W\d]|\$)[\w$]*', Name, '#pop')
],
'import': [
(r'[\w.]+\*?', Name.Namespace, '#pop')
],
'string': [
(r'[^\\"]+', String),
(r'\\\\', String), # Escaped backslash
(r'\\"', String), # Escaped quote
(r'\\', String), # Bare backslash
(r'"', String, '#pop'), # Closing quote
],
}
class AspectJLexer(JavaLexer):
"""
For `AspectJ <http://www.eclipse.org/aspectj/>`_ source code.
.. versionadded:: 1.6
"""
name = 'AspectJ'
aliases = ['aspectj']
filenames = ['*.aj']
mimetypes = ['text/x-aspectj']
aj_keywords = {
'aspect', 'pointcut', 'privileged', 'call', 'execution',
'initialization', 'preinitialization', 'handler', 'get', 'set',
'staticinitialization', 'target', 'args', 'within', 'withincode',
'cflow', 'cflowbelow', 'annotation', 'before', 'after', 'around',
'proceed', 'throwing', 'returning', 'adviceexecution', 'declare',
'parents', 'warning', 'error', 'soft', 'precedence', 'thisJoinPoint',
'thisJoinPointStaticPart', 'thisEnclosingJoinPointStaticPart',
'issingleton', 'perthis', 'pertarget', 'percflow', 'percflowbelow',
'pertypewithin', 'lock', 'unlock', 'thisAspectInstance'
}
aj_inter_type = {'parents:', 'warning:', 'error:', 'soft:', 'precedence:'}
aj_inter_type_annotation = {'@type', '@method', '@constructor', '@field'}
def get_tokens_unprocessed(self, text):
for index, token, value in JavaLexer.get_tokens_unprocessed(self, text):
if token is Name and value in self.aj_keywords:
yield index, Keyword, value
elif token is Name.Label and value in self.aj_inter_type:
yield index, Keyword, value[:-1]
yield index, Operator, value[-1]
elif token is Name.Decorator and value in self.aj_inter_type_annotation:
yield index, Keyword, value
else:
yield index, token, value
class ScalaLexer(RegexLexer):
"""
For `Scala <http://www.scala-lang.org>`_ source code.
"""
name = 'Scala'
aliases = ['scala']
filenames = ['*.scala']
mimetypes = ['text/x-scala']
flags = re.MULTILINE | re.DOTALL
opchar = '[!#%&*\\-\\/:?@^' + uni.combine('Sm', 'So') + ']'
letter = '[_\\$' + uni.combine('Ll', 'Lu', 'Lo', 'Nl', 'Lt') + ']'
upperLetter = '[' + uni.combine('Lu', 'Lt') + ']'
letterOrDigit = '(?:%s|[0-9])' % letter
letterOrDigitNoDollarSign = '(?:%s|[0-9])' % letter.replace('\\$', '')
alphaId = '%s+' % letter
simpleInterpolatedVariable = '%s%s*' % (letter, letterOrDigitNoDollarSign)
idrest = '%s%s*(?:(?<=_)%s+)?' % (letter, letterOrDigit, opchar)
idUpper = '%s%s*(?:(?<=_)%s+)?' % (upperLetter, letterOrDigit, opchar)
plainid = '(?:%s|%s+)' % (idrest, opchar)
backQuotedId = r'`[^`]+`'
anyId = r'(?:%s|%s)' % (plainid, backQuotedId)
notStartOfComment = r'(?!//|/\*)'
endOfLineMaybeWithComment = r'(?=\s*(//|$))'
keywords = (
'new', 'return', 'throw', 'classOf', 'isInstanceOf', 'asInstanceOf',
'else', 'if', 'then', 'do', 'while', 'for', 'yield', 'match', 'case',
'catch', 'finally', 'try'
)
operators = (
'<%', '=:=', '<:<', '<%<', '>:', '<:', '=', '==', '!=', '<=', '>=',
'<>', '<', '>', '<-', '←', '->', '→', '=>', '⇒', '?', '@', '|', '-',
'+', '*', '%', '~', '\\'
)
storage_modifiers = (
'private', 'protected', 'synchronized', '@volatile', 'abstract',
'final', 'lazy', 'sealed', 'implicit', 'override', '@transient',
'@native'
)
tokens = {
'root': [
include('whitespace'),
include('comments'),
include('script-header'),
include('imports'),
include('exports'),
include('storage-modifiers'),
include('annotations'),
include('using'),
include('declarations'),
include('inheritance'),
include('extension'),
include('end'),
include('constants'),
include('strings'),
include('symbols'),
include('singleton-type'),
include('inline'),
include('quoted'),
include('keywords'),
include('operators'),
include('punctuation'),
include('names'),
],
# Includes:
'whitespace': [
(r'\s+', Text),
],
'comments': [
(r'//.*?\n', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
],
'script-header': [
(r'^#!([^\n]*)$', Comment.Hashbang),
],
'imports': [
(r'\b(import)(\s+)', bygroups(Keyword, Text), 'import-path'),
],
'exports': [
(r'\b(export)(\s+)(given)(\s+)',
bygroups(Keyword, Text, Keyword, Text), 'export-path'),
(r'\b(export)(\s+)', bygroups(Keyword, Text), 'export-path'),
],
'storage-modifiers': [
(words(storage_modifiers, prefix=r'\b', suffix=r'\b'), Keyword),
# Only highlight soft modifiers if they are eventually followed by
# the correct keyword. Note that soft modifiers can be followed by a
# sequence of regular modifiers; [a-z\s]* skips those, and we just
# check that the soft modifier is applied to a supported statement.
(r'\b(transparent|opaque|infix|open|inline)\b(?=[a-z\s]*\b'
r'(def|val|var|given|type|class|trait|object|enum)\b)', Keyword),
],
'annotations': [
(r'@%s' % idrest, Name.Decorator),
],
'using': [
# using is a soft keyword, can only be used in the first position of
# a parameter or argument list.
(r'(\()(\s*)(using)(\s)', bygroups(Punctuation, Text, Keyword, Text)),
],
'declarations': [
(r'\b(def)\b(\s*)%s(%s)?' % (notStartOfComment, anyId),
bygroups(Keyword, Text, Name.Function)),
(r'\b(trait)\b(\s*)%s(%s)?' % (notStartOfComment, anyId),
bygroups(Keyword, Text, Name.Class)),
(r'\b(?:(case)(\s+))?(class|object|enum)\b(\s*)%s(%s)?' %
(notStartOfComment, anyId),
bygroups(Keyword, Text, Keyword, Text, Name.Class)),
(r'(?<!\.)\b(type)\b(\s*)%s(%s)?' % (notStartOfComment, anyId),
bygroups(Keyword, Text, Name.Class)),
(r'\b(val|var)\b', Keyword.Declaration),
(r'\b(package)(\s+)(object)\b(\s*)%s(%s)?' %
(notStartOfComment, anyId),
bygroups(Keyword, Text, Keyword, Text, Name.Namespace)),
(r'\b(package)(\s+)', bygroups(Keyword, Text), 'package'),
(r'\b(given)\b(\s*)(%s)' % idUpper,
bygroups(Keyword, Text, Name.Class)),
(r'\b(given)\b(\s*)(%s)?' % anyId,
bygroups(Keyword, Text, Name)),
],
'inheritance': [
(r'\b(extends|with|derives)\b(\s*)'
r'(%s|%s|(?=\([^\)]+=>)|(?=%s)|(?="))?' %
(idUpper, backQuotedId, plainid),
bygroups(Keyword, Text, Name.Class)),
],
'extension': [
(r'\b(extension)(\s+)(?=[\[\(])', bygroups(Keyword, Text)),
],
'end': [
# end is a soft keyword, should only be highlighted in certain cases
(r'\b(end)(\s+)(if|while|for|match|new|extension|val|var)\b',
bygroups(Keyword, Text, Keyword)),
(r'\b(end)(\s+)(%s)%s' % (idUpper, endOfLineMaybeWithComment),
bygroups(Keyword, Text, Name.Class)),
(r'\b(end)(\s+)(%s|%s)?%s' %
(backQuotedId, plainid, endOfLineMaybeWithComment),
bygroups(Keyword, Text, Name.Namespace)),
],
'punctuation': [
(r'[{}()\[\];,.]', Punctuation),
(r'(?<!:):(?!:)', Punctuation),
],
'keywords': [
(words(keywords, prefix=r'\b', suffix=r'\b'), Keyword),
],
'operators': [
(r'(%s{2,})(\s+)' % opchar, bygroups(Operator, Text)),
(r'/(?![/*])', Operator),
(words(operators), Operator),
(r'(?<!%s)(!|&&|\|\|)(?!%s)' % (opchar, opchar), Operator),
],
'constants': [
(r'\b(this|super)\b', Name.Builtin.Pseudo),
(r'(true|false|null)\b', Keyword.Constant),
(r'0[xX][0-9a-fA-F_]*', Number.Hex),
(r'([0-9][0-9_]*\.[0-9][0-9_]*|\.[0-9][0-9_]*)'
r'([eE][+-]?[0-9][0-9_]*)?[fFdD]?', Number.Float),
(r'[0-9]+([eE][+-]?[0-9]+)?[fFdD]', Number.Float),
(r'[0-9]+([eE][+-]?[0-9]+)[fFdD]?', Number.Float),
(r'[0-9]+[lL]', Number.Integer.Long),
(r'[0-9]+', Number.Integer),
(r'""".*?"""(?!")', String),
(r'"(\\\\|\\"|[^"])*"', String),
(r"(')(\\.)(')", bygroups(String.Char, String.Escape, String.Char)),
(r"'[^\\]'|'\\u[0-9a-fA-F]{4}'", String.Char),
],
"strings": [
(r'[fs]"""', String, 'interpolated-string-triple'),
(r'[fs]"', String, 'interpolated-string'),
(r'raw"(\\\\|\\"|[^"])*"', String),
],
'symbols': [
(r"('%s)(?!')" % plainid, String.Symbol),
],
'singleton-type': [
(r'(\.)(type)\b', bygroups(Punctuation, Keyword)),
],
'inline': [
# inline is a soft modifer, only highlighted if followed by if,
# match or parameters.
(r'\b(inline)(?=\s+(%s|%s)\s*:)' % (plainid, backQuotedId),
Keyword),
(r'\b(inline)\b(?=(?:.(?!\b(?:val|def|given)\b))*\b(if|match)\b)',
Keyword),
],
'quoted': [
# '{...} or ${...}
(r"['$]\{(?!')", Punctuation),
# '[...]
(r"'\[(?!')", Punctuation),
],
'names': [
(idUpper, Name.Class),
(anyId, Name),
],
# States
'comment': [
(r'[^/*]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline),
],
'import-path': [
(r'(?<=[\n;:])', Text, '#pop'),
include('comments'),
(r'\b(given)\b', Keyword),
include('qualified-name'),
(r'\{', Punctuation, 'import-path-curly-brace'),
],
'import-path-curly-brace': [
include('whitespace'),
include('comments'),
(r'\b(given)\b', Keyword),
(r'=>', Operator),
(r'\}', Punctuation, '#pop'),
(r',', Punctuation),
(r'[\[\]]', Punctuation),
include('qualified-name'),
],
'export-path': [
(r'(?<=[\n;:])', Text, '#pop'),
include('comments'),
include('qualified-name'),
(r'\{', Punctuation, 'export-path-curly-brace'),
],
'export-path-curly-brace': [
include('whitespace'),
include('comments'),
(r'=>', Operator),
(r'\}', Punctuation, '#pop'),
(r',', Punctuation),
include('qualified-name'),
],
'package': [
(r'(?<=[\n;])', Text, '#pop'),
(r':', Punctuation, '#pop'),
include('comments'),
include('qualified-name'),
],
'interpolated-string-triple': [
(r'"""(?!")', String, '#pop'),
(r'"', String),
include('interpolated-string-common'),
],
'interpolated-string': [
(r'"', String, '#pop'),
include('interpolated-string-common'),
],
'interpolated-string-brace': [
(r'\}', String.Interpol, '#pop'),
(r'\{', Punctuation, 'interpolated-string-nested-brace'),
include('root'),
],
'interpolated-string-nested-brace': [
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
include('root'),
],
# Helpers
'qualified-name': [
(idUpper, Name.Class),
(r'(%s)(\.)' % anyId, bygroups(Name.Namespace, Punctuation)),
(r'\.', Punctuation),
(anyId, Name),
(r'[^\S\n]+', Text),
],
'interpolated-string-common': [
(r'[^"$\\]+', String),
(r'\$\$', String.Escape),
(r'(\$)(%s)' % simpleInterpolatedVariable,
bygroups(String.Interpol, Name)),
(r'\$\{', String.Interpol, 'interpolated-string-brace'),
(r'\\.', String),
],
}
class GosuLexer(RegexLexer):
"""
For Gosu source code.
.. versionadded:: 1.5
"""
name = 'Gosu'
aliases = ['gosu']
filenames = ['*.gs', '*.gsx', '*.gsp', '*.vark']
mimetypes = ['text/x-gosu']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
# method names
(r'^(\s*(?:[a-zA-Z_][\w.\[\]]*\s+)+?)' # modifiers etc.
r'([a-zA-Z_]\w*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Operator)),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@[a-zA-Z_][\w.]*', Name.Decorator),
(r'(in|as|typeof|statictypeof|typeis|typeas|if|else|foreach|for|'
r'index|while|do|continue|break|return|try|catch|finally|this|'
r'throw|new|switch|case|default|eval|super|outer|classpath|'
r'using)\b', Keyword),
(r'(var|delegate|construct|function|private|internal|protected|'
r'public|abstract|override|final|static|extends|transient|'
r'implements|represents|readonly)\b', Keyword.Declaration),
(r'(property\s+)(get|set)?', Keyword.Declaration),
(r'(boolean|byte|char|double|float|int|long|short|void|block)\b',
Keyword.Type),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Text)),
(r'(true|false|null|NaN|Infinity)\b', Keyword.Constant),
(r'(class|interface|enhancement|enum)(\s+)([a-zA-Z_]\w*)',
bygroups(Keyword.Declaration, Text, Name.Class)),
(r'(uses)(\s+)([\w.]+\*?)',
bygroups(Keyword.Namespace, Text, Name.Namespace)),
(r'"', String, 'string'),
(r'(\??[.#])([a-zA-Z_]\w*)',
bygroups(Operator, Name.Attribute)),
(r'(:)([a-zA-Z_]\w*)',
bygroups(Operator, Name.Attribute)),
(r'[a-zA-Z_$]\w*', Name),
(r'and|or|not|[\\~^*!%&\[\](){}<>|+=:;,./?-]', Operator),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\n', Text)
],
'templateText': [
(r'(\\<)|(\\\$)', String),
(r'(<%@\s+)(extends|params)',
bygroups(Operator, Name.Decorator), 'stringTemplate'),
(r'<%!--.*?--%>', Comment.Multiline),
(r'(<%)|(<%=)', Operator, 'stringTemplate'),
(r'\$\{', Operator, 'stringTemplateShorthand'),
(r'.', String)
],
'string': [
(r'"', String, '#pop'),
include('templateText')
],
'stringTemplate': [
(r'"', String, 'string'),
(r'%>', Operator, '#pop'),
include('root')
],
'stringTemplateShorthand': [
(r'"', String, 'string'),
(r'\{', Operator, 'stringTemplateShorthand'),
(r'\}', Operator, '#pop'),
include('root')
],
}
class GosuTemplateLexer(Lexer):
"""
For Gosu templates.
.. versionadded:: 1.5
"""
name = 'Gosu Template'
aliases = ['gst']
filenames = ['*.gst']
mimetypes = ['text/x-gosu-template']
def get_tokens_unprocessed(self, text):
lexer = GosuLexer()
stack = ['templateText']
yield from lexer.get_tokens_unprocessed(text, stack)
class GroovyLexer(RegexLexer):
"""
For `Groovy <http://groovy.codehaus.org/>`_ source code.
.. versionadded:: 1.5
"""
name = 'Groovy'
aliases = ['groovy']
filenames = ['*.groovy','*.gradle']
mimetypes = ['text/x-groovy']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
# Groovy allows a file to start with a shebang
(r'#!(.*?)$', Comment.Preproc, 'base'),
default('base'),
],
'base': [
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
# keywords: go before method names to avoid lexing "throw new XYZ"
# as a method signature
(r'(assert|break|case|catch|continue|default|do|else|finally|for|'
r'if|goto|instanceof|new|return|switch|this|throw|try|while|in|as)\b',
Keyword),
# method names
(r'^(\s*(?:[a-zA-Z_][\w.\[\]]*\s+)+?)' # return arguments
r'('
r'[a-zA-Z_]\w*' # method name
r'|"(?:\\\\|\\[^\\]|[^"\\])*"' # or double-quoted method name
r"|'(?:\\\\|\\[^\\]|[^'\\])*'" # or single-quoted method name
r')'
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Operator)),
(r'@[a-zA-Z_][\w.]*', Name.Decorator),
(r'(abstract|const|enum|extends|final|implements|native|private|'
r'protected|public|static|strictfp|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Declaration),
(r'(def|boolean|byte|char|double|float|int|long|short|void)\b',
Keyword.Type),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Text)),
(r'(true|false|null)\b', Keyword.Constant),
(r'(class|interface)(\s+)', bygroups(Keyword.Declaration, Text),
'class'),
(r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r'""".*?"""', String.Double),
(r"'''.*?'''", String.Single),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
(r'\$/((?!/\$).)*/\$', String),
(r'/(\\\\|\\[^\\]|[^/\\])*/', String),
(r"'\\.'|'[^\\]'|'\\u[0-9a-fA-F]{4}'", String.Char),
(r'(\.)([a-zA-Z_]\w*)', bygroups(Operator, Name.Attribute)),
(r'[a-zA-Z_]\w*:', Name.Label),
(r'[a-zA-Z_$]\w*', Name),
(r'[~^*!%&\[\](){}<>|+=:;,./?-]', Operator),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+L?', Number.Integer),
(r'\n', Text)
],
'class': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
],
'import': [
(r'[\w.]+\*?', Name.Namespace, '#pop')
],
}
def analyse_text(text):
return shebang_matches(text, r'groovy')
class IokeLexer(RegexLexer):
"""
For `Ioke <http://ioke.org/>`_ (a strongly typed, dynamic,
prototype based programming language) source.
.. versionadded:: 1.4
"""
name = 'Ioke'
filenames = ['*.ik']
aliases = ['ioke', 'ik']
mimetypes = ['text/x-iokesrc']
tokens = {
'interpolatableText': [
(r'(\\b|\\e|\\t|\\n|\\f|\\r|\\"|\\\\|\\#|\\\Z|\\u[0-9a-fA-F]{1,4}'
r'|\\[0-3]?[0-7]?[0-7])', String.Escape),
(r'#\{', Punctuation, 'textInterpolationRoot')
],
'text': [
(r'(?<!\\)"', String, '#pop'),
include('interpolatableText'),
(r'[^"]', String)
],
'documentation': [
(r'(?<!\\)"', String.Doc, '#pop'),
include('interpolatableText'),
(r'[^"]', String.Doc)
],
'textInterpolationRoot': [
(r'\}', Punctuation, '#pop'),
include('root')
],
'slashRegexp': [
(r'(?<!\\)/[im-psux]*', String.Regex, '#pop'),
include('interpolatableText'),
(r'\\/', String.Regex),
(r'[^/]', String.Regex)
],
'squareRegexp': [
(r'(?<!\\)][im-psux]*', String.Regex, '#pop'),
include('interpolatableText'),
(r'\\]', String.Regex),
(r'[^\]]', String.Regex)
],
'squareText': [
(r'(?<!\\)]', String, '#pop'),
include('interpolatableText'),
(r'[^\]]', String)
],
'root': [
(r'\n', Text),
(r'\s+', Text),
# Comments
(r';(.*?)\n', Comment),
(r'\A#!(.*?)\n', Comment),
# Regexps
(r'#/', String.Regex, 'slashRegexp'),
(r'#r\[', String.Regex, 'squareRegexp'),
# Symbols
(r':[\w!:?]+', String.Symbol),
(r'[\w!:?]+:(?![\w!?])', String.Other),
(r':"(\\\\|\\[^\\]|[^"\\])*"', String.Symbol),
# Documentation
(r'((?<=fn\()|(?<=fnx\()|(?<=method\()|(?<=macro\()|(?<=lecro\()'
r'|(?<=syntax\()|(?<=dmacro\()|(?<=dlecro\()|(?<=dlecrox\()'
r'|(?<=dsyntax\())\s*"', String.Doc, 'documentation'),
# Text
(r'"', String, 'text'),
(r'#\[', String, 'squareText'),
# Mimic
(r'\w[\w!:?]+(?=\s*=.*mimic\s)', Name.Entity),
# Assignment
(r'[a-zA-Z_][\w!:?]*(?=[\s]*[+*/-]?=[^=].*($|\.))',
Name.Variable),
# keywords
(r'(break|cond|continue|do|ensure|for|for:dict|for:set|if|let|'
r'loop|p:for|p:for:dict|p:for:set|return|unless|until|while|'
r'with)(?![\w!:?])', Keyword.Reserved),
# Origin
(r'(eval|mimic|print|println)(?![\w!:?])', Keyword),
# Base
(r'(cell\?|cellNames|cellOwner\?|cellOwner|cells|cell|'
r'documentation|hash|identity|mimic|removeCell\!|undefineCell\!)'
r'(?![\w!:?])', Keyword),
# Ground
(r'(stackTraceAsText)(?![\w!:?])', Keyword),
# DefaultBehaviour Literals
(r'(dict|list|message|set)(?![\w!:?])', Keyword.Reserved),
# DefaultBehaviour Case
(r'(case|case:and|case:else|case:nand|case:nor|case:not|case:or|'
r'case:otherwise|case:xor)(?![\w!:?])', Keyword.Reserved),
# DefaultBehaviour Reflection
(r'(asText|become\!|derive|freeze\!|frozen\?|in\?|is\?|kind\?|'
r'mimic\!|mimics|mimics\?|prependMimic\!|removeAllMimics\!|'
r'removeMimic\!|same\?|send|thaw\!|uniqueHexId)'
r'(?![\w!:?])', Keyword),
# DefaultBehaviour Aspects
(r'(after|around|before)(?![\w!:?])', Keyword.Reserved),
# DefaultBehaviour
(r'(kind|cellDescriptionDict|cellSummary|genSym|inspect|notice)'
r'(?![\w!:?])', Keyword),
(r'(use|destructuring)', Keyword.Reserved),
# DefaultBehavior BaseBehavior
(r'(cell\?|cellOwner\?|cellOwner|cellNames|cells|cell|'
r'documentation|identity|removeCell!|undefineCell)'
r'(?![\w!:?])', Keyword),
# DefaultBehavior Internal
(r'(internal:compositeRegexp|internal:concatenateText|'
r'internal:createDecimal|internal:createNumber|'
r'internal:createRegexp|internal:createText)'
r'(?![\w!:?])', Keyword.Reserved),
# DefaultBehaviour Conditions
(r'(availableRestarts|bind|error\!|findRestart|handle|'
r'invokeRestart|rescue|restart|signal\!|warn\!)'
r'(?![\w!:?])', Keyword.Reserved),
# constants
(r'(nil|false|true)(?![\w!:?])', Name.Constant),
# names
(r'(Arity|Base|Call|Condition|DateTime|Aspects|Pointcut|'
r'Assignment|BaseBehavior|Boolean|Case|AndCombiner|Else|'
r'NAndCombiner|NOrCombiner|NotCombiner|OrCombiner|XOrCombiner|'
r'Conditions|Definitions|FlowControl|Internal|Literals|'
r'Reflection|DefaultMacro|DefaultMethod|DefaultSyntax|Dict|'
r'FileSystem|Ground|Handler|Hook|IO|IokeGround|Struct|'
r'LexicalBlock|LexicalMacro|List|Message|Method|Mixins|'
r'NativeMethod|Number|Origin|Pair|Range|Reflector|Regexp Match|'
r'Regexp|Rescue|Restart|Runtime|Sequence|Set|Symbol|'
r'System|Text|Tuple)(?![\w!:?])', Name.Builtin),
# functions
('(generateMatchMethod|aliasMethod|\u03bb|\u028E|fnx|fn|method|'
'dmacro|dlecro|syntax|macro|dlecrox|lecrox|lecro|syntax)'
'(?![\\w!:?])', Name.Function),
# Numbers
(r'-?0[xX][0-9a-fA-F]+', Number.Hex),
(r'-?(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'-?\d+', Number.Integer),
(r'#\(', Punctuation),
# Operators
(r'(&&>>|\|\|>>|\*\*>>|:::|::|\.\.\.|===|\*\*>|\*\*=|&&>|&&=|'
r'\|\|>|\|\|=|\->>|\+>>|!>>|<>>>|<>>|&>>|%>>|#>>|@>>|/>>|\*>>|'
r'\?>>|\|>>|\^>>|~>>|\$>>|=>>|<<=|>>=|<=>|<\->|=~|!~|=>|\+\+|'
r'\-\-|<=|>=|==|!=|&&|\.\.|\+=|\-=|\*=|\/=|%=|&=|\^=|\|=|<\-|'
r'\+>|!>|<>|&>|%>|#>|\@>|\/>|\*>|\?>|\|>|\^>|~>|\$>|<\->|\->|'
r'<<|>>|\*\*|\?\||\?&|\|\||>|<|\*|\/|%|\+|\-|&|\^|\||=|\$|!|~|'
r'\?|#|\u2260|\u2218|\u2208|\u2209)', Operator),
(r'(and|nand|or|xor|nor|return|import)(?![\w!?])',
Operator),
# Punctuation
(r'(\`\`|\`|\'\'|\'|\.|\,|@@|@|\[|\]|\(|\)|\{|\})', Punctuation),
# kinds
(r'[A-Z][\w!:?]*', Name.Class),
# default cellnames
(r'[a-z_][\w!:?]*', Name)
]
}
class ClojureLexer(RegexLexer):
"""
Lexer for `Clojure <http://clojure.org/>`_ source code.
.. versionadded:: 0.11
"""
name = 'Clojure'
aliases = ['clojure', 'clj']
filenames = ['*.clj']
mimetypes = ['text/x-clojure', 'application/x-clojure']
special_forms = (
'.', 'def', 'do', 'fn', 'if', 'let', 'new', 'quote', 'var', 'loop'
)
# It's safe to consider 'ns' a declaration thing because it defines a new
# namespace.
declarations = (
'def-', 'defn', 'defn-', 'defmacro', 'defmulti', 'defmethod',
'defstruct', 'defonce', 'declare', 'definline', 'definterface',
'defprotocol', 'defrecord', 'deftype', 'defproject', 'ns'
)
builtins = (
'*', '+', '-', '->', '/', '<', '<=', '=', '==', '>', '>=', '..',
'accessor', 'agent', 'agent-errors', 'aget', 'alength', 'all-ns',
'alter', 'and', 'append-child', 'apply', 'array-map', 'aset',
'aset-boolean', 'aset-byte', 'aset-char', 'aset-double', 'aset-float',
'aset-int', 'aset-long', 'aset-short', 'assert', 'assoc', 'await',
'await-for', 'bean', 'binding', 'bit-and', 'bit-not', 'bit-or',
'bit-shift-left', 'bit-shift-right', 'bit-xor', 'boolean', 'branch?',
'butlast', 'byte', 'cast', 'char', 'children', 'class',
'clear-agent-errors', 'comment', 'commute', 'comp', 'comparator',
'complement', 'concat', 'conj', 'cons', 'constantly', 'cond', 'if-not',
'construct-proxy', 'contains?', 'count', 'create-ns', 'create-struct',
'cycle', 'dec', 'deref', 'difference', 'disj', 'dissoc', 'distinct',
'doall', 'doc', 'dorun', 'doseq', 'dosync', 'dotimes', 'doto',
'double', 'down', 'drop', 'drop-while', 'edit', 'end?', 'ensure',
'eval', 'every?', 'false?', 'ffirst', 'file-seq', 'filter', 'find',
'find-doc', 'find-ns', 'find-var', 'first', 'float', 'flush', 'for',
'fnseq', 'frest', 'gensym', 'get-proxy-class', 'get',
'hash-map', 'hash-set', 'identical?', 'identity', 'if-let', 'import',
'in-ns', 'inc', 'index', 'insert-child', 'insert-left', 'insert-right',
'inspect-table', 'inspect-tree', 'instance?', 'int', 'interleave',
'intersection', 'into', 'into-array', 'iterate', 'join', 'key', 'keys',
'keyword', 'keyword?', 'last', 'lazy-cat', 'lazy-cons', 'left',
'lefts', 'line-seq', 'list*', 'list', 'load', 'load-file',
'locking', 'long', 'loop', 'macroexpand', 'macroexpand-1',
'make-array', 'make-node', 'map', 'map-invert', 'map?', 'mapcat',
'max', 'max-key', 'memfn', 'merge', 'merge-with', 'meta', 'min',
'min-key', 'name', 'namespace', 'neg?', 'new', 'newline', 'next',
'nil?', 'node', 'not', 'not-any?', 'not-every?', 'not=', 'ns-imports',
'ns-interns', 'ns-map', 'ns-name', 'ns-publics', 'ns-refers',
'ns-resolve', 'ns-unmap', 'nth', 'nthrest', 'or', 'parse', 'partial',
'path', 'peek', 'pop', 'pos?', 'pr', 'pr-str', 'print', 'print-str',
'println', 'println-str', 'prn', 'prn-str', 'project', 'proxy',
'proxy-mappings', 'quot', 'rand', 'rand-int', 'range', 're-find',
're-groups', 're-matcher', 're-matches', 're-pattern', 're-seq',
'read', 'read-line', 'reduce', 'ref', 'ref-set', 'refer', 'rem',
'remove', 'remove-method', 'remove-ns', 'rename', 'rename-keys',
'repeat', 'replace', 'replicate', 'resolve', 'rest', 'resultset-seq',
'reverse', 'rfirst', 'right', 'rights', 'root', 'rrest', 'rseq',
'second', 'select', 'select-keys', 'send', 'send-off', 'seq',
'seq-zip', 'seq?', 'set', 'short', 'slurp', 'some', 'sort',
'sort-by', 'sorted-map', 'sorted-map-by', 'sorted-set',
'special-symbol?', 'split-at', 'split-with', 'str', 'string?',
'struct', 'struct-map', 'subs', 'subvec', 'symbol', 'symbol?',
'sync', 'take', 'take-nth', 'take-while', 'test', 'time', 'to-array',
'to-array-2d', 'tree-seq', 'true?', 'union', 'up', 'update-proxy',
'val', 'vals', 'var-get', 'var-set', 'var?', 'vector', 'vector-zip',
'vector?', 'when', 'when-first', 'when-let', 'when-not',
'with-local-vars', 'with-meta', 'with-open', 'with-out-str',
'xml-seq', 'xml-zip', 'zero?', 'zipmap', 'zipper')
# valid names for identifiers
# well, names can only not consist fully of numbers
# but this should be good enough for now
# TODO / should divide keywords/symbols into namespace/rest
# but that's hard, so just pretend / is part of the name
valid_name = r'(?!#)[\w!$%*+<=>?/.#|-]+'
tokens = {
'root': [
# the comments - always starting with semicolon
# and going to the end of the line
(r';.*$', Comment.Single),
# whitespaces - usually not relevant
(r'[,\s]+', Text),
# numbers
(r'-?\d+\.\d+', Number.Float),
(r'-?\d+', Number.Integer),
(r'0x-?[abcdef\d]+', Number.Hex),
# strings, symbols and characters
(r'"(\\\\|\\[^\\]|[^"\\])*"', String),
(r"'" + valid_name, String.Symbol),
(r"\\(.|[a-z]+)", String.Char),
# keywords
(r'::?#?' + valid_name, String.Symbol),
# special operators
(r'~@|[`\'#^~&@]', Operator),
# highlight the special forms
(words(special_forms, suffix=' '), Keyword),
# Technically, only the special forms are 'keywords'. The problem
# is that only treating them as keywords means that things like
# 'defn' and 'ns' need to be highlighted as builtins. This is ugly
# and weird for most styles. So, as a compromise we're going to
# highlight them as Keyword.Declarations.
(words(declarations, suffix=' '), Keyword.Declaration),
# highlight the builtins
(words(builtins, suffix=' '), Name.Builtin),
# the remaining functions
(r'(?<=\()' + valid_name, Name.Function),
# find the remaining variables
(valid_name, Name.Variable),
# Clojure accepts vector notation
(r'(\[|\])', Punctuation),
# Clojure accepts map notation
(r'(\{|\})', Punctuation),
# the famous parentheses!
(r'(\(|\))', Punctuation),
],
}
class ClojureScriptLexer(ClojureLexer):
"""
Lexer for `ClojureScript <http://clojure.org/clojurescript>`_
source code.
.. versionadded:: 2.0
"""
name = 'ClojureScript'
aliases = ['clojurescript', 'cljs']
filenames = ['*.cljs']
mimetypes = ['text/x-clojurescript', 'application/x-clojurescript']
class TeaLangLexer(RegexLexer):
"""
For `Tea <http://teatrove.org/>`_ source code. Only used within a
TeaTemplateLexer.
.. versionadded:: 1.5
"""
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
# method names
(r'^(\s*(?:[a-zA-Z_][\w\.\[\]]*\s+)+?)' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Operator)),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@[a-zA-Z_][\w\.]*', Name.Decorator),
(r'(and|break|else|foreach|if|in|not|or|reverse)\b',
Keyword),
(r'(as|call|define)\b', Keyword.Declaration),
(r'(true|false|null)\b', Keyword.Constant),
(r'(template)(\s+)', bygroups(Keyword.Declaration, Text), 'template'),
(r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
(r'(\.)([a-zA-Z_]\w*)', bygroups(Operator, Name.Attribute)),
(r'[a-zA-Z_]\w*:', Name.Label),
(r'[a-zA-Z_\$]\w*', Name),
(r'(isa|[.]{3}|[.]{2}|[=#!<>+-/%&;,.\*\\\(\)\[\]\{\}])', Operator),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+L?', Number.Integer),
(r'\n', Text)
],
'template': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
],
'import': [
(r'[\w.]+\*?', Name.Namespace, '#pop')
],
}
class CeylonLexer(RegexLexer):
"""
For `Ceylon <http://ceylon-lang.org/>`_ source code.
.. versionadded:: 1.6
"""
name = 'Ceylon'
aliases = ['ceylon']
filenames = ['*.ceylon']
mimetypes = ['text/x-ceylon']
flags = re.MULTILINE | re.DOTALL
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'root': [
# method names
(r'^(\s*(?:[a-zA-Z_][\w.\[\]]*\s+)+?)' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Operator)),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
(r'(shared|abstract|formal|default|actual|variable|deprecated|small|'
r'late|literal|doc|by|see|throws|optional|license|tagged|final|native|'
r'annotation|sealed)\b', Name.Decorator),
(r'(break|case|catch|continue|else|finally|for|in|'
r'if|return|switch|this|throw|try|while|is|exists|dynamic|'
r'nonempty|then|outer|assert|let)\b', Keyword),
(r'(abstracts|extends|satisfies|'
r'super|given|of|out|assign)\b', Keyword.Declaration),
(r'(function|value|void|new)\b',
Keyword.Type),
(r'(assembly|module|package)(\s+)', bygroups(Keyword.Namespace, Text)),
(r'(true|false|null)\b', Keyword.Constant),
(r'(class|interface|object|alias)(\s+)',
bygroups(Keyword.Declaration, Text), 'class'),
(r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String),
(r"'\\.'|'[^\\]'|'\\\{#[0-9a-fA-F]{4}\}'", String.Char),
(r'(\.)([a-z_]\w*)',
bygroups(Operator, Name.Attribute)),
(r'[a-zA-Z_]\w*:', Name.Label),
(r'[a-zA-Z_]\w*', Name),
(r'[~^*!%&\[\](){}<>|+=:;,./?-]', Operator),
(r'\d{1,3}(_\d{3})+\.\d{1,3}(_\d{3})+[kMGTPmunpf]?', Number.Float),
(r'\d{1,3}(_\d{3})+\.[0-9]+([eE][+-]?[0-9]+)?[kMGTPmunpf]?',
Number.Float),
(r'[0-9][0-9]*\.\d{1,3}(_\d{3})+[kMGTPmunpf]?', Number.Float),
(r'[0-9][0-9]*\.[0-9]+([eE][+-]?[0-9]+)?[kMGTPmunpf]?',
Number.Float),
(r'#([0-9a-fA-F]{4})(_[0-9a-fA-F]{4})+', Number.Hex),
(r'#[0-9a-fA-F]+', Number.Hex),
(r'\$([01]{4})(_[01]{4})+', Number.Bin),
(r'\$[01]+', Number.Bin),
(r'\d{1,3}(_\d{3})+[kMGTP]?', Number.Integer),
(r'[0-9]+[kMGTP]?', Number.Integer),
(r'\n', Text)
],
'class': [
(r'[A-Za-z_]\w*', Name.Class, '#pop')
],
'import': [
(r'[a-z][\w.]*',
Name.Namespace, '#pop')
],
'comment': [
(r'[^*/]', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
}
class KotlinLexer(RegexLexer):
"""
For `Kotlin <http://kotlinlang.org/>`_
source code.
.. versionadded:: 1.5
"""
name = 'Kotlin'
aliases = ['kotlin']
filenames = ['*.kt', '*.kts']
mimetypes = ['text/x-kotlin']
flags = re.MULTILINE | re.DOTALL | re.UNICODE
kt_name = ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' +
'[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc', 'Cf',
'Mn', 'Mc') + ']*')
kt_space_name = ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' +
'[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc', 'Cf',
'Mn', 'Mc', 'Zs')
+ r'\'~!%^&*()+=|\[\]:;,.<>/\?-]*')
kt_id = '(' + kt_name + '|`' + kt_space_name + '`)'
modifiers = (r'actual|abstract|annotation|companion|const|crossinline|'
r'data|enum|expect|external|final|infix|inline|inner|'
r'internal|lateinit|noinline|open|operator|override|private|'
r'protected|public|sealed|suspend|tailrec')
tokens = {
'root': [
# Whitespaces
(r'[^\S\n]+', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'\n', Text),
# Comments
(r'//.*?\n', Comment.Single),
(r'^#!/.+?\n', Comment.Single), # shebang for kotlin scripts
(r'/[*].*?[*]/', Comment.Multiline),
# Keywords
(r'as\?', Keyword),
(r'(as|break|by|catch|constructor|continue|do|dynamic|else|finally|'
r'get|for|if|init|[!]*in|[!]*is|out|reified|return|set|super|this|'
r'throw|try|typealias|typeof|vararg|when|where|while)\b', Keyword),
(r'it\b', Name.Builtin),
# Built-in types
(words(('Boolean?', 'Byte?', 'Char?', 'Double?', 'Float?',
'Int?', 'Long?', 'Short?', 'String?', 'Any?', 'Unit?')), Keyword.Type),
(words(('Boolean', 'Byte', 'Char', 'Double', 'Float',
'Int', 'Long', 'Short', 'String', 'Any', 'Unit'), suffix=r'\b'), Keyword.Type),
# Constants
(r'(true|false|null)\b', Keyword.Constant),
# Imports
(r'(package|import)(\s+)(\S+)', bygroups(Keyword, Text, Name.Namespace)),
# Dot access
(r'(\?\.)((?:[^\W\d]|\$)[\w$]*)', bygroups(Operator, Name.Attribute)),
(r'(\.)((?:[^\W\d]|\$)[\w$]*)', bygroups(Punctuation, Name.Attribute)),
# Annotations
(r'@[^\W\d][\w.]*', Name.Decorator),
# Labels
(r'[^\W\d][\w.]+@', Name.Decorator),
# Object expression
(r'(object)(\s+)(:)(\s+)', bygroups(Keyword, Text, Punctuation, Text), 'class'),
# Types
(r'((?:(?:' + modifiers + r'|fun)\s+)*)(class|interface|object)(\s+)',
bygroups(using(this, state='modifiers'), Keyword.Declaration, Text), 'class'),
# Variables
(r'(var|val)(\s+)(\()', bygroups(Keyword.Declaration, Text, Punctuation),
'destructuring_assignment'),
(r'((?:(?:' + modifiers + r')\s+)*)(var|val)(\s+)',
bygroups(using(this, state='modifiers'), Keyword.Declaration, Text), 'variable'),
# Functions
(r'((?:(?:' + modifiers + r')\s+)*)(fun)(\s+)',
bygroups(using(this, state='modifiers'), Keyword.Declaration, Text), 'function'),
# Operators
(r'::|!!|\?[:.]', Operator),
(r'[~^*!%&\[\]<>|+=/?-]', Operator),
# Punctuation
(r'[{}();:.,]', Punctuation),
# Strings
(r'"""', String, 'multiline_string'),
(r'"', String, 'string'),
(r"'\\.'|'[^\\]'", String.Char),
# Numbers
(r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?[flFL]?|"
r"0[xX][0-9a-fA-F]+[Ll]?", Number),
# Identifiers
(r'' + kt_id + r'((\?[^.])?)', Name) # additionally handle nullable types
],
'class': [
(kt_id, Name.Class, '#pop')
],
'variable': [
(kt_id, Name.Variable, '#pop')
],
'destructuring_assignment': [
(r',', Punctuation),
(r'\s+', Text),
(kt_id, Name.Variable),
(r'(:)(\s+)(' + kt_id + ')', bygroups(Punctuation, Text, Name)),
(r'<', Operator, 'generic'),
(r'\)', Punctuation, '#pop')
],
'function': [
(r'<', Operator, 'generic'),
(r'' + kt_id + r'(\.)' + kt_id, bygroups(Name, Punctuation, Name.Function), '#pop'),
(kt_id, Name.Function, '#pop')
],
'generic': [
(r'(>)(\s*)', bygroups(Operator, Text), '#pop'),
(r':', Punctuation),
(r'(reified|out|in)\b', Keyword),
(r',', Punctuation),
(r'\s+', Text),
(kt_id, Name)
],
'modifiers': [
(r'\w+', Keyword.Declaration),
(r'\s+', Text),
default('#pop')
],
'string': [
(r'"', String, '#pop'),
include('string_common')
],
'multiline_string': [
(r'"""', String, '#pop'),
(r'"', String),
include('string_common')
],
'string_common': [
(r'\\\\', String), # escaped backslash
(r'\\"', String), # escaped quote
(r'\\', String), # bare backslash
(r'\$\{', String.Interpol, 'interpolation'),
(r'(\$)(\w+)', bygroups(String.Interpol, Name)),
(r'[^\\"$]+', String)
],
'interpolation': [
(r'"', String),
(r'\$\{', String.Interpol, 'interpolation'),
(r'\{', Punctuation, 'scope'),
(r'\}', String.Interpol, '#pop'),
include('root')
],
'scope': [
(r'\{', Punctuation, 'scope'),
(r'\}', Punctuation, '#pop'),
include('root')
]
}
class XtendLexer(RegexLexer):
"""
For `Xtend <http://xtend-lang.org/>`_ source code.
.. versionadded:: 1.6
"""
name = 'Xtend'
aliases = ['xtend']
filenames = ['*.xtend']
mimetypes = ['text/x-xtend']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
# method names
(r'^(\s*(?:[a-zA-Z_][\w.\[\]]*\s+)+?)' # return arguments
r'([a-zA-Z_$][\w$]*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Operator)),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@[a-zA-Z_][\w.]*', Name.Decorator),
(r'(assert|break|case|catch|continue|default|do|else|finally|for|'
r'if|goto|instanceof|new|return|switch|this|throw|try|while|IF|'
r'ELSE|ELSEIF|ENDIF|FOR|ENDFOR|SEPARATOR|BEFORE|AFTER)\b',
Keyword),
(r'(def|abstract|const|enum|extends|final|implements|native|private|'
r'protected|public|static|strictfp|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Declaration),
(r'(boolean|byte|char|double|float|int|long|short|void)\b',
Keyword.Type),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Text)),
(r'(true|false|null)\b', Keyword.Constant),
(r'(class|interface)(\s+)', bygroups(Keyword.Declaration, Text),
'class'),
(r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r"(''')", String, 'template'),
(r'(\u00BB)', String, 'template'),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
(r'[a-zA-Z_]\w*:', Name.Label),
(r'[a-zA-Z_$]\w*', Name),
(r'[~^*!%&\[\](){}<>\|+=:;,./?-]', Operator),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+L?', Number.Integer),
(r'\n', Text)
],
'class': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
],
'import': [
(r'[\w.]+\*?', Name.Namespace, '#pop')
],
'template': [
(r"'''", String, '#pop'),
(r'\u00AB', String, '#pop'),
(r'.', String)
],
}
class PigLexer(RegexLexer):
"""
For `Pig Latin <https://pig.apache.org/>`_ source code.
.. versionadded:: 2.0
"""
name = 'Pig'
aliases = ['pig']
filenames = ['*.pig']
mimetypes = ['text/x-pig']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'--.*', Comment),
(r'/\*[\w\W]*?\*/', Comment.Multiline),
(r'\\\n', Text),
(r'\\', Text),
(r'\'(?:\\[ntbrf\\\']|\\u[0-9a-f]{4}|[^\'\\\n\r])*\'', String),
include('keywords'),
include('types'),
include('builtins'),
include('punct'),
include('operators'),
(r'[0-9]*\.[0-9]+(e[0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+L?', Number.Integer),
(r'\n', Text),
(r'([a-z_]\w*)(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[()#:]', Text),
(r'[^(:#\'")\s]+', Text),
(r'\S+\s+', Text) # TODO: make tests pass without \s+
],
'keywords': [
(r'(assert|and|any|all|arrange|as|asc|bag|by|cache|CASE|cat|cd|cp|'
r'%declare|%default|define|dense|desc|describe|distinct|du|dump|'
r'eval|exex|explain|filter|flatten|foreach|full|generate|group|'
r'help|if|illustrate|import|inner|input|into|is|join|kill|left|'
r'limit|load|ls|map|matches|mkdir|mv|not|null|onschema|or|order|'
r'outer|output|parallel|pig|pwd|quit|register|returns|right|rm|'
r'rmf|rollup|run|sample|set|ship|split|stderr|stdin|stdout|store|'
r'stream|through|union|using|void)\b', Keyword)
],
'builtins': [
(r'(AVG|BinStorage|cogroup|CONCAT|copyFromLocal|copyToLocal|COUNT|'
r'cross|DIFF|MAX|MIN|PigDump|PigStorage|SIZE|SUM|TextLoader|'
r'TOKENIZE)\b', Name.Builtin)
],
'types': [
(r'(bytearray|BIGINTEGER|BIGDECIMAL|chararray|datetime|double|float|'
r'int|long|tuple)\b', Keyword.Type)
],
'punct': [
(r'[;(){}\[\]]', Punctuation),
],
'operators': [
(r'[#=,./%+\-?]', Operator),
(r'(eq|gt|lt|gte|lte|neq|matches)\b', Operator),
(r'(==|<=|<|>=|>|!=)', Operator),
],
}
class GoloLexer(RegexLexer):
"""
For `Golo <http://golo-lang.org/>`_ source code.
.. versionadded:: 2.0
"""
name = 'Golo'
filenames = ['*.golo']
aliases = ['golo']
tokens = {
'root': [
(r'[^\S\n]+', Text),
(r'#.*$', Comment),
(r'(\^|\.\.\.|:|\?:|->|==|!=|=|\+|\*|%|/|<=|<|>=|>|=|\.)',
Operator),
(r'(?<=[^-])(-)(?=[^-])', Operator),
(r'(?<=[^`])(is|isnt|and|or|not|oftype|in|orIfNull)\b', Operator.Word),
(r'[]{}|(),[]', Punctuation),
(r'(module|import)(\s+)',
bygroups(Keyword.Namespace, Text),
'modname'),
(r'\b([a-zA-Z_][\w$.]*)(::)', bygroups(Name.Namespace, Punctuation)),
(r'\b([a-zA-Z_][\w$]*(?:\.[a-zA-Z_][\w$]*)+)\b', Name.Namespace),
(r'(let|var)(\s+)',
bygroups(Keyword.Declaration, Text),
'varname'),
(r'(struct)(\s+)',
bygroups(Keyword.Declaration, Text),
'structname'),
(r'(function)(\s+)',
bygroups(Keyword.Declaration, Text),
'funcname'),
(r'(null|true|false)\b', Keyword.Constant),
(r'(augment|pimp'
r'|if|else|case|match|return'
r'|case|when|then|otherwise'
r'|while|for|foreach'
r'|try|catch|finally|throw'
r'|local'
r'|continue|break)\b', Keyword),
(r'(map|array|list|set|vector|tuple)(\[)',
bygroups(Name.Builtin, Punctuation)),
(r'(print|println|readln|raise|fun'
r'|asInterfaceInstance)\b', Name.Builtin),
(r'(`?[a-zA-Z_][\w$]*)(\()',
bygroups(Name.Function, Punctuation)),
(r'-?[\d_]*\.[\d_]*([eE][+-]?\d[\d_]*)?F?', Number.Float),
(r'0[0-7]+j?', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'-?\d[\d_]*L', Number.Integer.Long),
(r'-?\d[\d_]*', Number.Integer),
(r'`?[a-zA-Z_][\w$]*', Name),
(r'@[a-zA-Z_][\w$.]*', Name.Decorator),
(r'"""', String, combined('stringescape', 'triplestring')),
(r'"', String, combined('stringescape', 'doublestring')),
(r"'", String, combined('stringescape', 'singlestring')),
(r'----((.|\n)*?)----', String.Doc)
],
'funcname': [
(r'`?[a-zA-Z_][\w$]*', Name.Function, '#pop'),
],
'modname': [
(r'[a-zA-Z_][\w$.]*\*?', Name.Namespace, '#pop')
],
'structname': [
(r'`?[\w.]+\*?', Name.Class, '#pop')
],
'varname': [
(r'`?[a-zA-Z_][\w$]*', Name.Variable, '#pop'),
],
'string': [
(r'[^\\\'"\n]+', String),
(r'[\'"\\]', String)
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'triplestring': [
(r'"""', String, '#pop'),
include('string'),
(r'\n', String),
],
'doublestring': [
(r'"', String.Double, '#pop'),
include('string'),
],
'singlestring': [
(r"'", String, '#pop'),
include('string'),
],
'operators': [
(r'[#=,./%+\-?]', Operator),
(r'(eq|gt|lt|gte|lte|neq|matches)\b', Operator),
(r'(==|<=|<|>=|>|!=)', Operator),
],
}
class JasminLexer(RegexLexer):
"""
For `Jasmin <http://jasmin.sourceforge.net/>`_ assembly code.
.. versionadded:: 2.0
"""
name = 'Jasmin'
aliases = ['jasmin', 'jasminxt']
filenames = ['*.j']
_whitespace = r' \n\t\r'
_ws = r'(?:[%s]+)' % _whitespace
_separator = r'%s:=' % _whitespace
_break = r'(?=[%s]|$)' % _separator
_name = r'[^%s]+' % _separator
_unqualified_name = r'(?:[^%s.;\[/]+)' % _separator
tokens = {
'default': [
(r'\n', Text, '#pop'),
(r"'", String.Single, ('#pop', 'quote')),
(r'"', String.Double, 'string'),
(r'=', Punctuation),
(r':', Punctuation, 'label'),
(_ws, Text),
(r';.*', Comment.Single),
(r'(\$[-+])?0x-?[\da-fA-F]+%s' % _break, Number.Hex),
(r'(\$[-+]|\+)?-?\d+%s' % _break, Number.Integer),
(r'-?(\d+\.\d*|\.\d+)([eE][-+]?\d+)?[fFdD]?'
r'[\x00-\x08\x0b\x0c\x0e-\x1f]*%s' % _break, Number.Float),
(r'\$%s' % _name, Name.Variable),
# Directives
(r'\.annotation%s' % _break, Keyword.Reserved, 'annotation'),
(r'(\.attribute|\.bytecode|\.debug|\.deprecated|\.enclosing|'
r'\.interface|\.line|\.signature|\.source|\.stack|\.var|abstract|'
r'annotation|bridge|class|default|enum|field|final|fpstrict|'
r'interface|native|private|protected|public|signature|static|'
r'synchronized|synthetic|transient|varargs|volatile)%s' % _break,
Keyword.Reserved),
(r'\.catch%s' % _break, Keyword.Reserved, 'caught-exception'),
(r'(\.class|\.implements|\.inner|\.super|inner|invisible|'
r'invisibleparam|outer|visible|visibleparam)%s' % _break,
Keyword.Reserved, 'class/convert-dots'),
(r'\.field%s' % _break, Keyword.Reserved,
('descriptor/convert-dots', 'field')),
(r'(\.end|\.limit|use)%s' % _break, Keyword.Reserved,
'no-verification'),
(r'\.method%s' % _break, Keyword.Reserved, 'method'),
(r'\.set%s' % _break, Keyword.Reserved, 'var'),
(r'\.throws%s' % _break, Keyword.Reserved, 'exception'),
(r'(from|offset|to|using)%s' % _break, Keyword.Reserved, 'label'),
(r'is%s' % _break, Keyword.Reserved,
('descriptor/convert-dots', 'var')),
(r'(locals|stack)%s' % _break, Keyword.Reserved, 'verification'),
(r'method%s' % _break, Keyword.Reserved, 'enclosing-method'),
# Instructions
(words((
'aaload', 'aastore', 'aconst_null', 'aload', 'aload_0', 'aload_1', 'aload_2',
'aload_3', 'aload_w', 'areturn', 'arraylength', 'astore', 'astore_0', 'astore_1',
'astore_2', 'astore_3', 'astore_w', 'athrow', 'baload', 'bastore', 'bipush',
'breakpoint', 'caload', 'castore', 'd2f', 'd2i', 'd2l', 'dadd', 'daload', 'dastore',
'dcmpg', 'dcmpl', 'dconst_0', 'dconst_1', 'ddiv', 'dload', 'dload_0', 'dload_1',
'dload_2', 'dload_3', 'dload_w', 'dmul', 'dneg', 'drem', 'dreturn', 'dstore', 'dstore_0',
'dstore_1', 'dstore_2', 'dstore_3', 'dstore_w', 'dsub', 'dup', 'dup2', 'dup2_x1',
'dup2_x2', 'dup_x1', 'dup_x2', 'f2d', 'f2i', 'f2l', 'fadd', 'faload', 'fastore', 'fcmpg',
'fcmpl', 'fconst_0', 'fconst_1', 'fconst_2', 'fdiv', 'fload', 'fload_0', 'fload_1',
'fload_2', 'fload_3', 'fload_w', 'fmul', 'fneg', 'frem', 'freturn', 'fstore', 'fstore_0',
'fstore_1', 'fstore_2', 'fstore_3', 'fstore_w', 'fsub', 'i2b', 'i2c', 'i2d', 'i2f', 'i2l',
'i2s', 'iadd', 'iaload', 'iand', 'iastore', 'iconst_0', 'iconst_1', 'iconst_2',
'iconst_3', 'iconst_4', 'iconst_5', 'iconst_m1', 'idiv', 'iinc', 'iinc_w', 'iload',
'iload_0', 'iload_1', 'iload_2', 'iload_3', 'iload_w', 'imul', 'ineg', 'int2byte',
'int2char', 'int2short', 'ior', 'irem', 'ireturn', 'ishl', 'ishr', 'istore', 'istore_0',
'istore_1', 'istore_2', 'istore_3', 'istore_w', 'isub', 'iushr', 'ixor', 'l2d', 'l2f',
'l2i', 'ladd', 'laload', 'land', 'lastore', 'lcmp', 'lconst_0', 'lconst_1', 'ldc2_w',
'ldiv', 'lload', 'lload_0', 'lload_1', 'lload_2', 'lload_3', 'lload_w', 'lmul', 'lneg',
'lookupswitch', 'lor', 'lrem', 'lreturn', 'lshl', 'lshr', 'lstore', 'lstore_0',
'lstore_1', 'lstore_2', 'lstore_3', 'lstore_w', 'lsub', 'lushr', 'lxor',
'monitorenter', 'monitorexit', 'nop', 'pop', 'pop2', 'ret', 'ret_w', 'return', 'saload',
'sastore', 'sipush', 'swap'), suffix=_break), Keyword.Reserved),
(r'(anewarray|checkcast|instanceof|ldc|ldc_w|new)%s' % _break,
Keyword.Reserved, 'class/no-dots'),
(r'invoke(dynamic|interface|nonvirtual|special|'
r'static|virtual)%s' % _break, Keyword.Reserved,
'invocation'),
(r'(getfield|putfield)%s' % _break, Keyword.Reserved,
('descriptor/no-dots', 'field')),
(r'(getstatic|putstatic)%s' % _break, Keyword.Reserved,
('descriptor/no-dots', 'static')),
(words((
'goto', 'goto_w', 'if_acmpeq', 'if_acmpne', 'if_icmpeq',
'if_icmpge', 'if_icmpgt', 'if_icmple', 'if_icmplt', 'if_icmpne',
'ifeq', 'ifge', 'ifgt', 'ifle', 'iflt', 'ifne', 'ifnonnull',
'ifnull', 'jsr', 'jsr_w'), suffix=_break),
Keyword.Reserved, 'label'),
(r'(multianewarray|newarray)%s' % _break, Keyword.Reserved,
'descriptor/convert-dots'),
(r'tableswitch%s' % _break, Keyword.Reserved, 'table')
],
'quote': [
(r"'", String.Single, '#pop'),
(r'\\u[\da-fA-F]{4}', String.Escape),
(r"[^'\\]+", String.Single)
],
'string': [
(r'"', String.Double, '#pop'),
(r'\\([nrtfb"\'\\]|u[\da-fA-F]{4}|[0-3]?[0-7]{1,2})',
String.Escape),
(r'[^"\\]+', String.Double)
],
'root': [
(r'\n+', Text),
(r"'", String.Single, 'quote'),
include('default'),
(r'(%s)([ \t\r]*)(:)' % _name,
bygroups(Name.Label, Text, Punctuation)),
(_name, String.Other)
],
'annotation': [
(r'\n', Text, ('#pop', 'annotation-body')),
(r'default%s' % _break, Keyword.Reserved,
('#pop', 'annotation-default')),
include('default')
],
'annotation-body': [
(r'\n+', Text),
(r'\.end%s' % _break, Keyword.Reserved, '#pop'),
include('default'),
(_name, String.Other, ('annotation-items', 'descriptor/no-dots'))
],
'annotation-default': [
(r'\n+', Text),
(r'\.end%s' % _break, Keyword.Reserved, '#pop'),
include('default'),
default(('annotation-items', 'descriptor/no-dots'))
],
'annotation-items': [
(r"'", String.Single, 'quote'),
include('default'),
(_name, String.Other)
],
'caught-exception': [
(r'all%s' % _break, Keyword, '#pop'),
include('exception')
],
'class/convert-dots': [
include('default'),
(r'(L)((?:%s[/.])*)(%s)(;)' % (_unqualified_name, _name),
bygroups(Keyword.Type, Name.Namespace, Name.Class, Punctuation),
'#pop'),
(r'((?:%s[/.])*)(%s)' % (_unqualified_name, _name),
bygroups(Name.Namespace, Name.Class), '#pop')
],
'class/no-dots': [
include('default'),
(r'\[+', Punctuation, ('#pop', 'descriptor/no-dots')),
(r'(L)((?:%s/)*)(%s)(;)' % (_unqualified_name, _name),
bygroups(Keyword.Type, Name.Namespace, Name.Class, Punctuation),
'#pop'),
(r'((?:%s/)*)(%s)' % (_unqualified_name, _name),
bygroups(Name.Namespace, Name.Class), '#pop')
],
'descriptor/convert-dots': [
include('default'),
(r'\[+', Punctuation),
(r'(L)((?:%s[/.])*)(%s?)(;)' % (_unqualified_name, _name),
bygroups(Keyword.Type, Name.Namespace, Name.Class, Punctuation),
'#pop'),
(r'[^%s\[)L]+' % _separator, Keyword.Type, '#pop'),
default('#pop')
],
'descriptor/no-dots': [
include('default'),
(r'\[+', Punctuation),
(r'(L)((?:%s/)*)(%s)(;)' % (_unqualified_name, _name),
bygroups(Keyword.Type, Name.Namespace, Name.Class, Punctuation),
'#pop'),
(r'[^%s\[)L]+' % _separator, Keyword.Type, '#pop'),
default('#pop')
],
'descriptors/convert-dots': [
(r'\)', Punctuation, '#pop'),
default('descriptor/convert-dots')
],
'enclosing-method': [
(_ws, Text),
(r'(?=[^%s]*\()' % _separator, Text, ('#pop', 'invocation')),
default(('#pop', 'class/convert-dots'))
],
'exception': [
include('default'),
(r'((?:%s[/.])*)(%s)' % (_unqualified_name, _name),
bygroups(Name.Namespace, Name.Exception), '#pop')
],
'field': [
(r'static%s' % _break, Keyword.Reserved, ('#pop', 'static')),
include('default'),
(r'((?:%s[/.](?=[^%s]*[/.]))*)(%s[/.])?(%s)' %
(_unqualified_name, _separator, _unqualified_name, _name),
bygroups(Name.Namespace, Name.Class, Name.Variable.Instance),
'#pop')
],
'invocation': [
include('default'),
(r'((?:%s[/.](?=[^%s(]*[/.]))*)(%s[/.])?(%s)(\()' %
(_unqualified_name, _separator, _unqualified_name, _name),
bygroups(Name.Namespace, Name.Class, Name.Function, Punctuation),
('#pop', 'descriptor/convert-dots', 'descriptors/convert-dots',
'descriptor/convert-dots'))
],
'label': [
include('default'),
(_name, Name.Label, '#pop')
],
'method': [
include('default'),
(r'(%s)(\()' % _name, bygroups(Name.Function, Punctuation),
('#pop', 'descriptor/convert-dots', 'descriptors/convert-dots',
'descriptor/convert-dots'))
],
'no-verification': [
(r'(locals|method|stack)%s' % _break, Keyword.Reserved, '#pop'),
include('default')
],
'static': [
include('default'),
(r'((?:%s[/.](?=[^%s]*[/.]))*)(%s[/.])?(%s)' %
(_unqualified_name, _separator, _unqualified_name, _name),
bygroups(Name.Namespace, Name.Class, Name.Variable.Class), '#pop')
],
'table': [
(r'\n+', Text),
(r'default%s' % _break, Keyword.Reserved, '#pop'),
include('default'),
(_name, Name.Label)
],
'var': [
include('default'),
(_name, Name.Variable, '#pop')
],
'verification': [
include('default'),
(r'(Double|Float|Integer|Long|Null|Top|UninitializedThis)%s' %
_break, Keyword, '#pop'),
(r'Object%s' % _break, Keyword, ('#pop', 'class/no-dots')),
(r'Uninitialized%s' % _break, Keyword, ('#pop', 'label'))
]
}
def analyse_text(text):
score = 0
if re.search(r'^\s*\.class\s', text, re.MULTILINE):
score += 0.5
if re.search(r'^\s*[a-z]+_[a-z]+\b', text, re.MULTILINE):
score += 0.3
if re.search(r'^\s*\.(attribute|bytecode|debug|deprecated|enclosing|'
r'inner|interface|limit|set|signature|stack)\b', text,
re.MULTILINE):
score += 0.6
return min(score, 1.0)
class SarlLexer(RegexLexer):
"""
For `SARL <http://www.sarl.io>`_ source code.
.. versionadded:: 2.4
"""
name = 'SARL'
aliases = ['sarl']
filenames = ['*.sarl']
mimetypes = ['text/x-sarl']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
# method names
(r'^(\s*(?:[a-zA-Z_][\w.\[\]]*\s+)+?)' # return arguments
r'([a-zA-Z_$][\w$]*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Operator)),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@[a-zA-Z_][\w.]*', Name.Decorator),
(r'(as|break|case|catch|default|do|else|extends|extension|finally|'
r'fires|for|if|implements|instanceof|new|on|requires|return|super|'
r'switch|throw|throws|try|typeof|uses|while|with)\b',
Keyword),
(r'(abstract|def|dispatch|final|native|override|private|protected|'
r'public|static|strictfp|synchronized|transient|val|var|volatile)\b',
Keyword.Declaration),
(r'(boolean|byte|char|double|float|int|long|short|void)\b',
Keyword.Type),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Text)),
(r'(false|it|null|occurrence|this|true|void)\b', Keyword.Constant),
(r'(agent|annotation|artifact|behavior|capacity|class|enum|event|'
r'interface|skill|space)(\s+)', bygroups(Keyword.Declaration, Text),
'class'),
(r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
(r'[a-zA-Z_]\w*:', Name.Label),
(r'[a-zA-Z_$]\w*', Name),
(r'[~^*!%&\[\](){}<>\|+=:;,./?-]', Operator),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+L?', Number.Integer),
(r'\n', Text)
],
'class': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
],
'import': [
(r'[\w.]+\*?', Name.Namespace, '#pop')
],
}
|
sonntagsgesicht/regtest
|
.aux/venv/lib/python3.9/site-packages/pygments/lexers/jvm.py
|
Python
|
apache-2.0
| 71,561
|
# Copyright 2013 Mirantis Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
from unittest import mock
from neutronclient.neutron.v2_0.lb import healthmonitor
from neutronclient.tests.unit import test_cli20
class CLITestV20LbHealthmonitorJSON(test_cli20.CLITestV20Base):
def test_create_healthmonitor_with_mandatory_params(self):
# lb-healthmonitor-create with mandatory params only.
resource = 'health_monitor'
cmd = healthmonitor.CreateHealthMonitor(test_cli20.MyApp(sys.stdout),
None)
admin_state_up = False
delay = '60'
max_retries = '2'
timeout = '10'
type = 'TCP'
tenant_id = 'my-tenant'
my_id = 'my-id'
args = ['--admin-state-down',
'--delay', delay,
'--max-retries', max_retries,
'--timeout', timeout,
'--type', type,
'--tenant-id', tenant_id]
position_names = ['admin_state_up', 'delay', 'max_retries', 'timeout',
'type', 'tenant_id']
position_values = [admin_state_up, delay, max_retries, timeout, type,
tenant_id]
self._test_create_resource(resource, cmd, '', my_id, args,
position_names, position_values)
def test_create_healthmonitor_with_all_params(self):
# lb-healthmonitor-create with all params set.
resource = 'health_monitor'
cmd = healthmonitor.CreateHealthMonitor(test_cli20.MyApp(sys.stdout),
None)
admin_state_up = False
delay = '60'
expected_codes = '200-202,204'
http_method = 'HEAD'
max_retries = '2'
timeout = '10'
type = 'TCP'
tenant_id = 'my-tenant'
url_path = '/health'
my_id = 'my-id'
args = ['--admin-state-down',
'--delay', delay,
'--expected-codes', expected_codes,
'--http-method', http_method,
'--max-retries', max_retries,
'--timeout', timeout,
'--type', type,
'--tenant-id', tenant_id,
'--url-path', url_path]
position_names = ['admin_state_up', 'delay',
'expected_codes', 'http_method',
'max_retries', 'timeout',
'type', 'tenant_id', 'url_path']
position_values = [admin_state_up, delay,
expected_codes, http_method,
max_retries, timeout,
type, tenant_id, url_path]
self._test_create_resource(resource, cmd, '', my_id, args,
position_names, position_values)
def test_list_healthmonitors(self):
# lb-healthmonitor-list.
resources = "health_monitors"
cmd = healthmonitor.ListHealthMonitor(test_cli20.MyApp(sys.stdout),
None)
self._test_list_resources(resources, cmd, True)
def test_list_healthmonitors_pagination(self):
# lb-healthmonitor-list.
resources = "health_monitors"
cmd = healthmonitor.ListHealthMonitor(test_cli20.MyApp(sys.stdout),
None)
self._test_list_resources_with_pagination(resources, cmd)
def test_list_healthmonitors_sort(self):
# lb-healthmonitor-list --sort-key name --sort-key id --sort-key asc
# --sort-key desc
resources = "health_monitors"
cmd = healthmonitor.ListHealthMonitor(test_cli20.MyApp(sys.stdout),
None)
self._test_list_resources(resources, cmd,
sort_key=["name", "id"],
sort_dir=["asc", "desc"])
def test_list_healthmonitors_limit(self):
# lb-healthmonitor-list -P.
resources = "health_monitors"
cmd = healthmonitor.ListHealthMonitor(test_cli20.MyApp(sys.stdout),
None)
self._test_list_resources(resources, cmd, page_size=1000)
def test_show_healthmonitor_id(self):
# lb-healthmonitor-show test_id.
resource = 'health_monitor'
cmd = healthmonitor.ShowHealthMonitor(test_cli20.MyApp(sys.stdout),
None)
args = ['--fields', 'id', self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args, ['id'])
def test_update_health_monitor(self):
# lb-healthmonitor-update myid --name myname --tags a b.
resource = 'health_monitor'
cmd = healthmonitor.UpdateHealthMonitor(test_cli20.MyApp(sys.stdout),
None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--timeout', '5'],
{'timeout': '5', })
def test_delete_healthmonitor(self):
# lb-healthmonitor-delete my-id."""
resource = 'health_monitor'
cmd = healthmonitor.DeleteHealthMonitor(test_cli20.MyApp(sys.stdout),
None)
my_id = 'my-id'
args = [my_id]
self._test_delete_resource(resource, cmd, my_id, args)
def test_associate_healthmonitor(self):
cmd = healthmonitor.AssociateHealthMonitor(
test_cli20.MyApp(sys.stdout),
None)
resource = 'health_monitor'
health_monitor_id = 'hm-id'
pool_id = 'p_id'
args = [health_monitor_id, pool_id]
body = {resource: {'id': health_monitor_id}}
result = {resource: {'id': health_monitor_id}, }
result_str = self.client.serialize(result)
path = getattr(self.client,
"associate_pool_health_monitors_path") % pool_id
return_tup = (test_cli20.MyResp(200), result_str)
cmd_parser = cmd.get_parser('test_' + resource)
parsed_args = cmd_parser.parse_args(args)
with mock.patch.object(cmd, "get_client",
return_value=self.client) as mock_get_client, \
mock.patch.object(self.client.httpclient, "request",
return_value=return_tup) as mock_request:
cmd.run(parsed_args)
mock_get_client.assert_called_once_with()
mock_request.assert_called_once_with(
test_cli20.end_url(path), 'POST',
body=test_cli20.MyComparator(body, self.client),
headers=test_cli20.ContainsKeyValue(
{'X-Auth-Token': test_cli20.TOKEN}))
def test_disassociate_healthmonitor(self):
cmd = healthmonitor.DisassociateHealthMonitor(
test_cli20.MyApp(sys.stdout),
None)
resource = 'health_monitor'
health_monitor_id = 'hm-id'
pool_id = 'p_id'
args = [health_monitor_id, pool_id]
path = (getattr(self.client,
"disassociate_pool_health_monitors_path") %
{'pool': pool_id, 'health_monitor': health_monitor_id})
return_tup = (test_cli20.MyResp(204), None)
cmd_parser = cmd.get_parser('test_' + resource)
parsed_args = cmd_parser.parse_args(args)
with mock.patch.object(cmd, "get_client",
return_value=self.client) as mock_get_client, \
mock.patch.object(self.client.httpclient, "request",
return_value=return_tup) as mock_request:
cmd.run(parsed_args)
mock_get_client.assert_called_once_with()
mock_request.assert_called_once_with(
test_cli20.end_url(path), 'DELETE',
body=None,
headers=test_cli20.ContainsKeyValue(
{'X-Auth-Token': test_cli20.TOKEN}))
|
openstack/python-neutronclient
|
neutronclient/tests/unit/lb/test_cli20_healthmonitor.py
|
Python
|
apache-2.0
| 8,574
|
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
import datetime # for checking renewal date range
class RenewBookForm(forms.Form):
renewal_date = forms.DateField(help_text="Enter a date between now and 4 weeks (default 3). ")
def clean_renewal_date(self):
data = self.cleaned_data['renewal_date']
# check date is not in past
if data < datetime.date.today():
raise ValidationError(_('Invalid date - renewal in past'))
# check date is in range librarian allowed to change(+4 weeks)
if data > datetime.date.today() + datetime.timedelta(weeks=4):
raise ValidationError(_('Invalid date - renewal more than 4 weeks ahead'))
# Returning the cleaned data
return data
|
PatrickCmd/django_local_library
|
catalog/forms.py
|
Python
|
apache-2.0
| 776
|
from pyramid.view import view_config
from twonicornweb.views import (
site_layout,
get_user,
)
@view_config(context=Exception, renderer='twonicornweb:templates/exception.pt')
def error(exc, request):
request.response.status_int = 500
page_title = 'Internal Server Error'
user = get_user(request)
return {'layout': site_layout(),
'page_title': page_title,
'user': user,
'error': exc.message
}
|
CityGrid/twonicorn
|
twonicornweb/views/exception.py
|
Python
|
apache-2.0
| 471
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""Tool to gauge message passing throughput and latencies"""
import logging
import optparse
import time
import uuid
import pyngus
from proton import Message
from utils import connect_socket
from utils import get_host_port
from utils import process_connection
LOG = logging.getLogger()
LOG.addHandler(logging.StreamHandler())
class ConnectionEventHandler(pyngus.ConnectionEventHandler):
def __init__(self):
super(ConnectionEventHandler, self).__init__()
def connection_failed(self, connection, error):
"""Connection has failed in some way."""
LOG.warn("Connection failed callback: %s", error)
def connection_remote_closed(self, connection, pn_condition):
"""Peer has closed its end of the connection."""
LOG.debug("connection_remote_closed condition=%s", pn_condition)
connection.close()
class SenderHandler(pyngus.SenderEventHandler):
def __init__(self, count):
self._count = count
self._msg = Message()
self.calls = 0
self.total_ack_latency = 0.0
self.stop_time = None
self.start_time = None
def credit_granted(self, sender_link):
if self.start_time is None:
self.start_time = time.time()
self._send_message(sender_link)
def _send_message(self, link):
now = time.time()
self._msg.body = {'tx-timestamp': now}
self._last_send = now
link.send(self._msg, self)
def __call__(self, link, handle, status, error):
now = time.time()
self.total_ack_latency += now - self._last_send
self.calls += 1
if self._count:
self._count -= 1
if self._count == 0:
self.stop_time = now
link.close()
return
self._send_message(link)
def sender_remote_closed(self, sender_link, pn_condition):
LOG.debug("Sender peer_closed condition=%s", pn_condition)
sender_link.close()
def sender_failed(self, sender_link, error):
"""Protocol error occurred."""
LOG.debug("Sender failed error=%s", error)
sender_link.close()
class ReceiverHandler(pyngus.ReceiverEventHandler):
def __init__(self, count, capacity):
self._count = count
self._capacity = capacity
self._msg = Message()
self.receives = 0
self.tx_total_latency = 0.0
def receiver_active(self, receiver_link):
receiver_link.add_capacity(self._capacity)
def receiver_remote_closed(self, receiver_link, pn_condition):
"""Peer has closed its end of the link."""
LOG.debug("receiver_remote_closed condition=%s", pn_condition)
receiver_link.close()
def receiver_failed(self, receiver_link, error):
"""Protocol error occurred."""
LOG.warn("receiver_failed error=%s", error)
receiver_link.close()
def message_received(self, receiver, message, handle):
now = time.time()
receiver.message_accepted(handle)
self.tx_total_latency += now - message.body['tx-timestamp']
self.receives += 1
if self._count:
self._count -= 1
if self._count == 0:
receiver.close()
return
lc = receiver.capacity
cap = self._capacity
if lc < (cap / 2):
receiver.add_capacity(cap - lc)
def main(argv=None):
_usage = """Usage: %prog [options]"""
parser = optparse.OptionParser(usage=_usage)
parser.add_option("-a", dest="server", type="string",
default="amqp://0.0.0.0:5672",
help="The address of the server [amqp://0.0.0.0:5672]")
parser.add_option("--node", type='string', default='amq.topic',
help='Name of source/target node')
parser.add_option("--count", type='int', default=100,
help='Send N messages (send forever if N==0)')
parser.add_option("--debug", dest="debug", action="store_true",
help="enable debug logging")
parser.add_option("--trace", dest="trace", action="store_true",
help="enable protocol tracing")
opts, _ = parser.parse_args(args=argv)
if opts.debug:
LOG.setLevel(logging.DEBUG)
host, port = get_host_port(opts.server)
my_socket = connect_socket(host, port)
# create AMQP Container, Connection, and SenderLink
#
container = pyngus.Container(uuid.uuid4().hex)
conn_properties = {'hostname': host,
'x-server': False}
if opts.trace:
conn_properties["x-trace-protocol"] = True
c_handler = ConnectionEventHandler()
connection = container.create_connection("perf_tool",
c_handler,
conn_properties)
r_handler = ReceiverHandler(opts.count, opts.count or 1000)
receiver = connection.create_receiver(opts.node, opts.node, r_handler)
s_handler = SenderHandler(opts.count)
sender = connection.create_sender(opts.node, opts.node, s_handler)
connection.open()
receiver.open()
while not receiver.active:
process_connection(connection, my_socket)
sender.open()
# Run until all messages transfered
while not sender.closed or not receiver.closed:
process_connection(connection, my_socket)
connection.close()
while not connection.closed:
process_connection(connection, my_socket)
duration = s_handler.stop_time - s_handler.start_time
thru = s_handler.calls / duration
permsg = duration / s_handler.calls
ack = s_handler.total_ack_latency / s_handler.calls
lat = r_handler.tx_total_latency / r_handler.receives
print("Stats:\n"
" TX Avg Calls/Sec: %f Per Call: %f Ack Latency %f\n"
" RX Latency: %f" % (thru, permsg, ack, lat))
sender.destroy()
receiver.destroy()
connection.destroy()
container.destroy()
my_socket.close()
return 0
if __name__ == "__main__":
main()
|
kgiusti/pyngus
|
examples/perf-tool.py
|
Python
|
apache-2.0
| 6,874
|
import logging
import pytest
import sdk_cmd
import sdk_install
import sdk_plan
import sdk_security
import sdk_utils
from tests import config
from tests import test_utils
LOG = logging.getLogger(__name__)
@pytest.fixture(scope="module", autouse=True)
def zookeeper_server(configure_security):
service_options = {
"service": {"name": config.ZOOKEEPER_SERVICE_NAME, "virtual_network_enabled": True}
}
zk_account = "test-zookeeper-service-account"
zk_secret = "test-zookeeper-secret"
try:
sdk_install.uninstall(config.ZOOKEEPER_PACKAGE_NAME, config.ZOOKEEPER_SERVICE_NAME)
if sdk_utils.is_strict_mode():
service_options = sdk_utils.merge_dictionaries(
{"service": {"service_account": zk_account, "service_account_secret": zk_secret}},
service_options,
)
sdk_security.setup_security(
config.ZOOKEEPER_SERVICE_NAME,
service_account=zk_account,
service_account_secret=zk_secret,
)
sdk_install.install(
config.ZOOKEEPER_PACKAGE_NAME,
config.ZOOKEEPER_SERVICE_NAME,
config.ZOOKEEPER_TASK_COUNT,
package_version=config.ZOOKEEPER_PACKAGE_VERSION,
additional_options=service_options,
timeout_seconds=30 * 60,
insert_strict_options=False,
)
yield {**service_options, **{"package_name": config.ZOOKEEPER_PACKAGE_NAME}}
finally:
sdk_install.uninstall(config.ZOOKEEPER_PACKAGE_NAME, config.ZOOKEEPER_SERVICE_NAME)
if sdk_utils.is_strict_mode():
sdk_security.delete_service_account(
service_account_name=zk_account, service_account_secret=zk_secret
)
@pytest.fixture(scope="module", autouse=True)
def kafka_server(zookeeper_server):
try:
# Get the zookeeper DNS values
zookeeper_dns = sdk_cmd.svc_cli(
zookeeper_server["package_name"],
zookeeper_server["service"]["name"],
"endpoint clientport",
parse_json=True,
)[1]["dns"]
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
config.install(
config.PACKAGE_NAME,
config.SERVICE_NAME,
config.DEFAULT_BROKER_COUNT,
additional_options={"kafka": {"kafka_zookeeper_uri": ",".join(zookeeper_dns)}},
)
# wait for brokers to finish registering before starting tests
test_utils.broker_count_check(config.DEFAULT_BROKER_COUNT, service_name=config.SERVICE_NAME)
yield {"package_name": config.PACKAGE_NAME, "service": {"name": config.SERVICE_NAME}}
finally:
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
def topic_create(kafka_server: dict):
return test_utils.create_topic(config.EPHEMERAL_TOPIC_NAME, kafka_server["service"]["name"])
def fetch_topic(kafka_server: dict):
_, topic_list, _ = sdk_cmd.svc_cli(
config.PACKAGE_NAME, kafka_server["service"]["name"], "topic list", parse_json=True
)
return topic_list
def restart_zookeeper_node(id: int, kafka_server: dict):
sdk_cmd.svc_cli(
config.ZOOKEEPER_PACKAGE_NAME,
config.ZOOKEEPER_SERVICE_NAME,
"pod restart zookeeper-{}".format(id),
)
sdk_plan.wait_for_completed_recovery(config.ZOOKEEPER_SERVICE_NAME)
@pytest.mark.sanity
@pytest.mark.zookeeper
def test_check_topic_list_on_zk_restart(kafka_server: dict):
topic_create(kafka_server)
topic_list_before = fetch_topic(kafka_server)
for id in range(0, int(config.ZOOKEEPER_TASK_COUNT / 2)):
restart_zookeeper_node(id, kafka_server)
topic_list_after = fetch_topic(kafka_server)
assert topic_list_before == topic_list_after
|
mesosphere/dcos-kafka-service
|
frameworks/kafka/tests/test_check_topic_list_on_zk_restart.py
|
Python
|
apache-2.0
| 3,812
|
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements additional custom Pylint checkers to be used as part of
presubmit checks. Next message id would be C0029.
"""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import linecache
import os
import re
import sys
import tokenize
import python_utils
from .. import docstrings_checker
_PARENT_DIR = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
_PYLINT_PATH = os.path.join(_PARENT_DIR, 'oppia_tools', 'pylint-1.9.4')
sys.path.insert(0, _PYLINT_PATH)
# List of punctuation symbols that can be used at the end of
# comments and docstrings.
ALLOWED_TERMINATING_PUNCTUATIONS = ['.', '?', '}', ']', ')']
# If any of these phrases are found inside a docstring or comment,
# the punctuation and capital letter checks will be skipped for that
# comment or docstring.
EXCLUDED_PHRASES = [
'coding:', 'pylint:', 'http://', 'https://', 'scripts/', 'extract_node']
import astroid # isort:skip pylint: disable=wrong-import-order, wrong-import-position
from pylint import checkers # isort:skip pylint: disable=wrong-import-order, wrong-import-position
from pylint import interfaces # isort:skip pylint: disable=wrong-import-order, wrong-import-position
from pylint.checkers import typecheck # isort:skip pylint: disable=wrong-import-order, wrong-import-position
from pylint.checkers import utils as checker_utils # isort:skip pylint: disable=wrong-import-order, wrong-import-position
from pylint.extensions import _check_docs_utils # isort:skip pylint: disable=wrong-import-order, wrong-import-position
def read_from_node(node):
"""Returns the data read from the ast node in unicode form.
Args:
node: astroid.scoped_nodes.Function. Node to access module content.
Returns:
list(str). The data read from the ast node.
"""
return list([line.decode('utf-8') for line in node.stream().readlines()])
class ExplicitKeywordArgsChecker(checkers.BaseChecker):
"""Custom pylint checker which checks for explicit keyword arguments
in any function call.
"""
__implements__ = interfaces.IAstroidChecker
name = 'explicit-keyword-args'
priority = -1
msgs = {
'C0001': (
'Keyword argument %s should be named explicitly in %s call of %s.',
'non-explicit-keyword-args',
'All keyword arguments should be explicitly named in function call.'
),
'C0027': (
'Keyword argument %s used for a non keyword argument in %s '
'call of %s.',
'arg-name-for-non-keyword-arg',
'Position arguments should not be used as keyword arguments '
'in function call.'
),
}
def _check_non_explicit_keyword_args(
self, node, name, callable_name, keyword_args,
num_positional_args_unused, num_mandatory_parameters):
"""Custom pylint check to ensure that position arguments should not
be used as keyword arguments.
Args:
node: astroid.node.Function. The current function call node.
name: str. Name of the keyword argument.
callable_name: str. Name of method type.
keyword_args: list(str). Name of all keyword arguments in function
call.
num_positional_args_unused: int. Number of unused positional
arguments.
num_mandatory_parameters: int. Number of mandatory parameters.
Returns:
int. Number of unused positional arguments.
"""
display_name = repr(name)
if name not in keyword_args and (
num_positional_args_unused > (
num_mandatory_parameters)) and (
callable_name != 'constructor'):
# This try/except block tries to get the function
# name. Since each node may differ, multiple
# blocks have been used.
try:
func_name = node.func.attrname
except AttributeError:
func_name = node.func.name
self.add_message(
'non-explicit-keyword-args', node=node,
args=(
display_name,
callable_name,
func_name))
num_positional_args_unused -= 1
return num_positional_args_unused
def _check_argname_for_nonkeyword_arg(
self, node, called, callable_name, keyword_args,
keyword_args_in_funcdef):
"""Custom pylint check to ensure that position arguments should not
be used as keyword arguments.
Args:
node: astroid.node.Function. The current function call node.
called: astroid.Call. The function call object.
keyword_args: list(str). Name of all keyword arguments in function
call.
callable_name: str. Name of method type.
keyword_args_in_funcdef: list(str). Name of all keyword arguments in
function definition.
"""
for arg in keyword_args:
# TODO(#10038): Fix the check to cover below case as well.
# If there is *args and **kwargs in the function definition skip the
# check because we can use keywords arguments in function call even
# if **kwargs is present in the function definition. See Example:
# Function def -> def func(entity_id, *args, **kwargs):
# Function call -> func(entity_id='1', a=1, b=2, c=3)
# By parsing calling method we get
# keyword_arguments = entity_id, a, b, c.
# From the function definition, we will get keyword_arguments = []
# Now we do not have a way to identify which one is a keyword
# argument and which one is not.
if not called.args.kwarg and callable_name != 'constructor':
if not arg in keyword_args_in_funcdef:
# This try/except block tries to get the function
# name.
try:
func_name = node.func.attrname
except AttributeError:
func_name = node.func.name
self.add_message(
'arg-name-for-non-keyword-arg', node=node,
args=(repr(arg), callable_name, func_name))
def visit_call(self, node):
"""Visits each function call in a lint check.
Args:
node: Call. The current function call node.
"""
called = checker_utils.safe_infer(node.func)
try:
# For the rationale behind the Pylint pragma below,
# see https://stackoverflow.com/a/35701863/8115428
called, implicit_args, callable_name = (
typecheck._determine_callable(called)) # pylint: disable=protected-access
except ValueError:
return
if called.args.args is None:
# Built-in functions have no argument information.
return
if len(called.argnames()) != len(set(called.argnames())):
return
# Build the set of keyword arguments and count the positional arguments.
call_site = astroid.arguments.CallSite.from_call(node)
num_positional_args = len(call_site.positional_arguments)
keyword_args = list(call_site.keyword_arguments.keys())
already_filled_positionals = getattr(called, 'filled_positionals', 0)
already_filled_keywords = getattr(called, 'filled_keywords', {})
keyword_args += list(already_filled_keywords)
num_positional_args += already_filled_positionals
num_positional_args += implicit_args
# Analyze the list of formal parameters.
num_mandatory_parameters = len(called.args.args) - len(
called.args.defaults)
parameters = []
parameter_name_to_index = {}
for i, arg in enumerate(called.args.args):
if isinstance(arg, astroid.Tuple):
name = None
else:
assert isinstance(arg, astroid.AssignName)
name = arg.name
parameter_name_to_index[name] = i
if i >= num_mandatory_parameters:
defval = called.args.defaults[i - num_mandatory_parameters]
else:
defval = None
parameters.append([(name, defval), False])
num_positional_args_unused = num_positional_args
# The list below will store all the keyword arguments present in the
# function definition.
keyword_args_in_funcdef = []
# Check that all parameters with a default value have
# been called explicitly.
for [(name, defval), _] in parameters:
if defval:
keyword_args_in_funcdef.append(name)
num_positional_args_unused = (
self._check_non_explicit_keyword_args(
node, name, callable_name, keyword_args,
num_positional_args_unused, num_mandatory_parameters))
self._check_argname_for_nonkeyword_arg(
node, called, callable_name, keyword_args, keyword_args_in_funcdef)
class HangingIndentChecker(checkers.BaseChecker):
"""Custom pylint checker which checks for break after parenthesis in case
of hanging indentation.
"""
__implements__ = interfaces.ITokenChecker
name = 'hanging-indent'
priority = -1
msgs = {
'C0002': (
(
'There should be a break after parenthesis when content within '
'parenthesis spans multiple lines.'),
'no-break-after-hanging-indent',
(
'If something within parenthesis extends along multiple lines, '
'break after opening parenthesis.')
),
}
def process_tokens(self, tokens):
"""Process tokens to check if there is a line break after the bracket.
Args:
tokens: astroid.Tokens. Object to process tokens.
"""
escape_character_indicator = b'\\'
string_indicator = b'\''
excluded = False
for (token_type, token, (line_num, _), _, line) in tokens:
# Check if token type is an operator and is either a
# left parenthesis '(' or a right parenthesis ')'.
if token_type == tokenize.OP and (
token == b'(' or token == b')'):
line = line.strip()
# Exclude 'if', 'elif', 'while' statements.
if line.startswith((b'if ', b'while ', b'elif ')):
excluded = True
# Skip check if there is a comment at the end of line.
if excluded:
split_line = line.split()
if '#' in split_line:
comment_index = split_line.index('#')
if split_line[comment_index - 1].endswith(b'):'):
excluded = False
elif line.endswith(b'):'):
excluded = False
if excluded:
continue
bracket_count = 0
line_length = len(line)
escape_character_found = False
in_string = False
for char_num in python_utils.RANGE(line_length):
char = line[char_num]
if in_string and (
char == escape_character_indicator or
escape_character_found):
escape_character_found = not escape_character_found
continue
# Check if we found the string indicator and flip the
# in_string boolean.
if char == string_indicator:
in_string = not in_string
# Ignore anything inside a string.
if in_string:
continue
if char == b'(':
if bracket_count == 0:
position = char_num
bracket_count += 1
elif char == b')' and bracket_count > 0:
bracket_count -= 1
if bracket_count > 0 and position + 1 < line_length:
# Allow the use of '[', ']', '{', '}' after the parenthesis.
separators = set('[{( ')
if line[line_length - 1] in separators:
continue
content = line[position + 1:]
# Skip check if there is nothing after the bracket.
split_content = content.split()
# Skip check if there is a comment at the end of line.
if '#' in split_content:
comment_index = split_content.index('#')
if comment_index == 0:
continue
else:
if split_content[comment_index - 1].endswith(b'('):
continue
self.add_message(
'no-break-after-hanging-indent', line=line_num)
# The following class was derived from
# https://github.com/PyCQA/pylint/blob/377cc42f9e3116ff97cddd4567d53e9a3e24ebf9/pylint/extensions/docparams.py#L26
class DocstringParameterChecker(checkers.BaseChecker):
"""Checker for Sphinx, Google, or Numpy style docstrings
* Check that all function, method and constructor parameters are mentioned
in the params and types part of the docstring. Constructor parameters
can be documented in either the class docstring or ``__init__`` docstring,
but not both.
* Check that there are no naming inconsistencies between the signature and
the documentation, i.e. also report documented parameters that are missing
in the signature. This is important to find cases where parameters are
renamed only in the code, not in the documentation.
* Check that all explicitly raised exceptions in a function are documented
in the function docstring. Caught exceptions are ignored.
Args:
linter: Pylinter. The linter object.
"""
__implements__ = interfaces.IAstroidChecker
name = 'parameter_documentation'
msgs = {
'W9005': (
'"%s" has constructor parameters '
'documented in class and __init__',
'multiple-constructor-doc',
'Please remove parameter declarations '
'in the class or constructor.'),
'W9006': (
'"%s" not documented as being raised',
'missing-raises-doc',
'Please document exceptions for '
'all raised exception types.'),
'W9008': (
'Redundant returns documentation',
'redundant-returns-doc',
'Please remove the return/rtype '
'documentation from this method.'),
'W9010': (
'Redundant yields documentation',
'redundant-yields-doc',
'Please remove the yields documentation from this method.'),
'W9011': (
'Missing return documentation',
'missing-return-doc',
'Please add documentation about what this method returns.',
{'old_names': [('W9007', 'missing-returns-doc')]}),
'W9012': (
'Missing return type documentation',
'missing-return-type-doc',
'Please document the type returned by this method.',
# We can't use the same old_name for two different warnings
# {'old_names': [('W9007', 'missing-returns-doc')]}.
),
'W9013': (
'Missing yield documentation',
'missing-yield-doc',
'Please add documentation about what this generator yields.',
{'old_names': [('W9009', 'missing-yields-doc')]}),
'W9014': (
'Missing yield type documentation',
'missing-yield-type-doc',
'Please document the type yielded by this method.',
# We can't use the same old_name for two different warnings
# {'old_names': [('W9009', 'missing-yields-doc')]}.
),
'W9015': (
'"%s" missing in parameter documentation',
'missing-param-doc',
'Please add parameter declarations for all parameters.',
{'old_names': [('W9003', 'missing-param-doc')]}),
'W9016': (
'"%s" missing in parameter type documentation',
'missing-type-doc',
'Please add parameter type declarations for all parameters.',
{'old_names': [('W9004', 'missing-type-doc')]}),
'W9017': (
'"%s" differing in parameter documentation',
'differing-param-doc',
'Please check parameter names in declarations.',
),
'W9018': (
'"%s" differing in parameter type documentation',
'differing-type-doc',
'Please check parameter names in type declarations.',
),
'W9019': (
'Line starting with "%s" requires 4 space indentation relative to'
' args line indentation',
'4-space-indentation-for-arg-parameters-doc',
'Please use 4 space indentation in parameter definitions relative'
' to the args line indentation.'
),
'W9020': (
'Line starting with "%s" requires 8 space indentation relative to'
' args line indentation',
'8-space-indentation-for-arg-in-descriptions-doc',
'Please indent wrap-around descriptions by 8 relative to the args'
' line indentation.'
),
'W9021': (
'Args: indentation is incorrect, must be at the outermost'
' indentation level.',
'incorrect-indentation-for-arg-header-doc',
'Please indent args line to the outermost indentation level.'
),
'W9022': (
'4 space indentation in docstring.',
'4-space-indentation-in-docstring',
'Please use 4 space indentation for parameters relative to section'
' headers.'
),
'W9023': (
'8 space indentation in docstring.',
'8-space-indentation-in-docstring',
'Please use 8 space indentation in wrap around messages'
' relative to section headers.'
),
'W9024': (
'Raises section should be the following form: Exception_name. '
'Description.',
'malformed-raises-section',
'The parameter is incorrectly formatted.'
),
'W9025': (
'Period is not used at the end of the docstring.',
'no-period-used',
'Please use a period at the end of the docstring,'
),
'W9026': (
'Multiline docstring should end with a new line.',
'no-newline-used-at-end',
'Please end multiline docstring with a new line.'
),
'W9027': (
'Single line docstring should not span two lines.',
'single-line-docstring-span-two-lines',
'Please do not use two lines for a single line docstring. '
'If line length exceeds 80 characters, '
'convert the single line docstring to a multiline docstring.'
),
'W9028': (
'Empty line before the end of multi-line docstring.',
'empty-line-before-end',
'Please do not use empty line before '
'the end of the multi-line docstring.'
),
'W9029': (
'Space after """ in docstring.',
'space-after-triple-quote',
'Please do not use space after """ in docstring.'
),
'W9030': (
'Missing single newline below class docstring.',
'newline-below-class-docstring',
'Please add a single newline below class docstring.'
),
'W9031': (
'Files must have a single newline above args in doc string.',
'single-space-above-args',
'Please enter a single newline above args in doc string.'
),
'W9032': (
'Files must have a single newline above returns in doc string.',
'single-space-above-returns',
'Please enter a single newline above returns in doc string.'
),
'W9033': (
'Files must have a single newline above raises in doc string.',
'single-space-above-raises',
'Please enter a single newline above raises in doc string.'
),
'W9034': (
'Files must have a single newline above yield in doc string.',
'single-space-above-yield',
'Please enter a single newline above yield in doc string.'
),
'W9035': (
'Arguments should be in following form: variable_name: typeinfo. '
'Description.',
'malformed-args-section',
'The parameter is incorrectly formatted.'
),
'W9036': (
'Returns should be in the following form: typeinfo. Description.',
'malformed-returns-section',
'The parameter is incorrectly formatted.'
),
'W9037': (
'Yields should be in the following form: typeinfo. Description.',
'malformed-yields-section',
'The parameter is incorrectly formatted.'
),
'W9038': (
'Arguments starting with *args should be formatted in the following'
' form: *args: list(*). Description.',
'malformed-args-argument',
'The parameter is incorrectly formatted.'
)
}
options = (
(
'accept-no-param-doc',
{'default': True, 'type': 'yn', 'metavar': '<y or n>',
'help': 'Whether to accept totally missing parameter '
'documentation in the docstring of a '
'function that has parameters.'
}),
(
'accept-no-raise-doc',
{'default': True, 'type': 'yn', 'metavar': '<y or n>',
'help': 'Whether to accept totally missing raises '
'documentation in the docstring of a function that '
'raises an exception.'
}),
(
'accept-no-return-doc',
{'default': True, 'type': 'yn', 'metavar': '<y or n>',
'help': 'Whether to accept totally missing return '
'documentation in the docstring of a function that '
'returns a statement.'
}),
(
'accept-no-yields-doc',
{'default': True, 'type': 'yn', 'metavar': '<y or n>',
'help': 'Whether to accept totally missing yields '
'documentation in the docstring of a generator.'
}),
)
priority = -2
constructor_names = {'__init__', '__new__'}
not_needed_param_in_docstring = {'self', 'cls'}
docstring_sections = {'Raises:', 'Returns:', 'Yields:'}
# Docstring section headers split up into arguments, returns, yields
# and raises sections signifying that we are currently parsing the
# corresponding section of that docstring.
DOCSTRING_SECTION_RETURNS = 'returns'
DOCSTRING_SECTION_YIELDS = 'yields'
DOCSTRING_SECTION_RAISES = 'raises'
def visit_classdef(self, node):
"""Visit each class definition in a module and check if there is a
single new line below each class docstring.
Args:
node: astroid.nodes.ClassDef. Node for a class definition
in the AST.
"""
# Check if the given node has docstring.
if node.doc is None:
return
line_number = node.fromlineno
# Iterate till the start of docstring.
while True:
line = linecache.getline(node.root().file, line_number).strip()
if line.startswith((b'"""', b'\'\'\'', b'\'', b'"')):
break
else:
line_number += 1
doc_length = len(node.doc.split(b'\n'))
line_number += doc_length
first_line_after_doc = linecache.getline(
node.root().file, line_number).strip()
second_line_after_doc = linecache.getline(
node.root().file, line_number + 1).strip()
if first_line_after_doc != b'':
self.add_message('newline-below-class-docstring', node=node)
elif second_line_after_doc == b'':
self.add_message('newline-below-class-docstring', node=node)
def visit_functiondef(self, node):
"""Called for function and method definitions (def).
Args:
node: astroid.scoped_nodes.Function. Node for a function or
method definition in the AST.
"""
node_doc = docstrings_checker.docstringify(node.doc)
self.check_functiondef_params(node, node_doc)
self.check_functiondef_returns(node, node_doc)
self.check_functiondef_yields(node, node_doc)
self.check_docstring_style(node)
self.check_docstring_section_indentation(node)
self.check_typeinfo(node, node_doc)
def check_typeinfo(self, node, node_doc):
"""Checks whether all parameters in a function definition are
properly formatted.
Args:
node: astroid.node.Function. Node for a function or
method definition in the AST.
node_doc: Docstring. Pylint Docstring class instance representing
a node's docstring.
"""
# The regexes are taken from the pylint codebase and are modified
# according to our needs. Link: https://github.com/PyCQA/pylint/blob/
# e89c361668aeead9fd192d5289c186611ef779ca/pylint/extensions/
# _check_docs_utils.py#L428.
re_param_line = re.compile(
r"""
\s* \*{{0,2}}(\w+) # identifier potentially with asterisks
\s* ( [:]
\s*
({type}|\S*|[\s\S]*)
(?:,\s+optional)?
[.]+\s )+ \s*
\s* [A-Z0-9](.*)[.\]}}\)]+$ # beginning of optional description
""".format(
type=_check_docs_utils.GoogleDocstring.re_multiple_type,
), flags=re.X | re.S | re.M)
re_returns_line = re.compile(
r"""
\s* (({type}|\S*|[\s\S]*).[.]+\s)+ # identifier
\s* [A-Z0-9](.*)[.\]}}\)]+$ # beginning of description
""".format(
type=_check_docs_utils.GoogleDocstring.re_multiple_type,
), flags=re.X | re.S | re.M)
re_yields_line = re_returns_line
re_raise_line = re.compile(
r"""
\s* ({type}[.])+ # identifier
\s* [A-Z0-9](.*)[.\]}}\)]+$ # beginning of description
""".format(
type=_check_docs_utils.GoogleDocstring.re_multiple_type,
), flags=re.X | re.S | re.M)
# We need to extract the information from the given section for that
# we need to use _parse_section as this will extract all the arguments
# from the Args section, as this is a private method hence we need to
# use the pylint pragma to escape the pylint warning.
if node_doc.has_params():
entries = node_doc._parse_section( # pylint: disable=protected-access
_check_docs_utils.GoogleDocstring.re_param_section)
for entry in entries:
if entry.lstrip().startswith('*args') and not (
entry.lstrip().startswith('*args: list(*)')):
self.add_message('malformed-args-argument', node=node)
match = re_param_line.match(entry)
if not match:
self.add_message('malformed-args-section', node=node)
# We need to extract the information from the given section for that
# we need to use _parse_section as this will extract all the returns
# from the Returns section, as this is a private method hence we need to
# use the pylint pragma to escape the pylint warning.
if node_doc.has_returns():
entries = node_doc._parse_section( # pylint: disable=protected-access
_check_docs_utils.GoogleDocstring.re_returns_section)
entries = [''.join(entries)]
for entry in entries:
match = re_returns_line.match(entry)
if not match:
self.add_message('malformed-returns-section', node=node)
# We need to extract the information from the given section for that
# we need to use _parse_section as this will extract all the yields
# from the Yields section, as this is a private method hence we need to
# use the pylint pragma to escape the pylint warning.
if node_doc.has_yields():
entries = node_doc._parse_section( # pylint: disable=protected-access
_check_docs_utils.GoogleDocstring.re_yields_section)
entries = [''.join(entries)]
for entry in entries:
match = re_yields_line.match(entry)
if not match:
self.add_message('malformed-yields-section', node=node)
# We need to extract the information from the given section for that
# we need to use _parse_section as this will extract all the exceptions
# from the Raises section, as this is a private method hence we need to
# use the pylint pragma to escape the pylint warning.
if node_doc.exceptions():
entries = node_doc._parse_section( # pylint: disable=protected-access
_check_docs_utils.GoogleDocstring.re_raise_section)
for entry in entries:
match = re_raise_line.match(entry)
if not match:
self.add_message('malformed-raises-section', node=node)
def check_functiondef_params(self, node, node_doc):
"""Checks whether all parameters in a function definition are
documented.
Args:
node: astroid.scoped_nodes.Function. Node for a function or
method definition in the AST.
node_doc: Docstring. Pylint Docstring class instance representing
a node's docstring.
"""
node_allow_no_param = None
if node.name in self.constructor_names:
class_node = checker_utils.node_frame_class(node)
if class_node is not None:
class_doc = docstrings_checker.docstringify(class_node.doc)
self.check_single_constructor_params(
class_doc, node_doc, class_node)
# __init__ or class docstrings can have no parameters documented
# as long as the other documents them.
node_allow_no_param = (
class_doc.has_params() or
class_doc.params_documented_elsewhere() or
None
)
class_allow_no_param = (
node_doc.has_params() or
node_doc.params_documented_elsewhere() or
None
)
self.check_arguments_in_docstring(
class_doc, node.args, class_node,
accept_no_param_doc=class_allow_no_param)
self.check_arguments_in_docstring(
node_doc, node.args, node,
accept_no_param_doc=node_allow_no_param)
def check_docstring_style(self, node):
"""It fetches a function node and extract the class node from function
node if it is inside a class body and passes it to
check_docstring_structure which checks whether the docstring has a
space at the beginning and a period at the end.
Args:
node: astroid.scoped_nodes.Function. Node for a function or
method definition in the AST.
"""
if node.name in self.constructor_names:
class_node = checker_utils.node_frame_class(node)
if class_node is not None:
self.check_docstring_structure(class_node)
self.check_docstring_structure(node)
def check_newline_above_args(self, node, docstring):
"""Checks to ensure that there is a single space above the
argument parameters in the docstring.
Args:
node: astroid.node.Function. Node for a function or method
definition in the AST.
docstring: list(str). Function docstring in splitted by newlines.
"""
blank_line_counter = 0
for line in docstring:
line = line.strip()
if line == b'':
blank_line_counter += 1
if blank_line_counter == 0 or blank_line_counter > 1:
if line == b'Args:':
self.add_message(
'single-space-above-args', node=node)
elif line == b'Returns:':
self.add_message(
'single-space-above-returns', node=node)
elif line == b'Raises:':
self.add_message(
'single-space-above-raises', node=node)
elif line == b'Yields:':
self.add_message(
'single-space-above-yield', node=node)
if line != b'':
blank_line_counter = 0
def check_docstring_structure(self, node):
"""Checks whether the docstring has the correct structure i.e.
do not have space at the beginning and have a period at the end of
docstring.
Args:
node: astroid.scoped_nodes.Function. Node for a function or
method definition in the AST.
"""
if node.doc:
docstring = node.doc.splitlines()
# Check for space after """ in docstring.
if docstring[0][0] == b' ':
self.add_message('space-after-triple-quote', node=node)
# Check if single line docstring span two lines.
if len(docstring) == 2 and docstring[-1].strip() == b'':
self.add_message(
'single-line-docstring-span-two-lines', node=node)
# Check for punctuation at end of a single line docstring.
elif (len(docstring) == 1 and docstring[-1][-1] not in
ALLOWED_TERMINATING_PUNCTUATIONS):
self.add_message('no-period-used', node=node)
# Check for punctuation at the end of a multiline docstring.
elif len(docstring) > 1:
if docstring[-2].strip() == b'':
self.add_message('empty-line-before-end', node=node)
elif docstring[-1].strip() != b'':
self.add_message(
'no-newline-used-at-end', node=node)
elif (docstring[-2][-1] not in
ALLOWED_TERMINATING_PUNCTUATIONS and not
any(word in docstring[-2] for word in EXCLUDED_PHRASES)):
self.add_message('no-period-used', node=node)
def check_docstring_section_indentation(self, node):
"""Checks whether the function argument definitions ("Args": section,
"Returns": section, "Yield": section, "Raises: section) are indented
properly. Parameters should be indented by 4 relative to the 'Args:'
'Return:', 'Raises:', 'Yield:' line and any wrap-around descriptions
should be indented by 8.
Args:
node: astroid.scoped_nodes.Function. Node for a function or
method definition in the AST.
"""
arguments_node = node.args
expected_argument_names = set(
None if (arg.name in self.not_needed_param_in_docstring)
else (arg.name + ':') for arg in arguments_node.args)
currently_in_args_section = False
# When we are in the args section and a line ends in a colon,
# we can ignore the indentation styling in the next section of
# description, hence a freeform section.
currently_in_freeform_section = False
args_indentation = 0
if node.doc:
current_docstring_section = None
in_description = False
args_indentation_in_spaces = 0
docstring = node.doc.splitlines()
self.check_newline_above_args(node, docstring)
for line in docstring:
stripped_line = line.lstrip()
current_line_indentation = (
len(line) - len(stripped_line))
parameter = re.search(
'^[^:]+:',
stripped_line)
# Check for empty lines and ignore them.
if len(line.strip()) == 0:
continue
# If line starts with Returns: , it is the header of a Returns
# subsection.
if stripped_line.startswith('Returns:'):
current_docstring_section = (
self.DOCSTRING_SECTION_RETURNS)
in_freeform_section = False
in_description = False
args_indentation_in_spaces = current_line_indentation
# If line starts with Raises: , it is the header of a Raises
# subsection.
elif stripped_line.startswith('Raises:'):
current_docstring_section = (
self.DOCSTRING_SECTION_RAISES)
in_freeform_section = False
in_description = False
args_indentation_in_spaces = current_line_indentation
# If line starts with Yields: , it is the header of a Yields
# subsection.
elif stripped_line.startswith('Yields:'):
current_docstring_section = (
self.DOCSTRING_SECTION_YIELDS)
in_freeform_section = False
in_description = False
args_indentation_in_spaces = current_line_indentation
# Check if we are in a docstring raises section.
elif (current_docstring_section and
(current_docstring_section ==
self.DOCSTRING_SECTION_RAISES)):
# In the raises section, if we see this regex expression, we
# can assume it's the start of a new parameter definition.
# We check the indentation of the parameter definition.
if re.search(br'^[a-zA-Z0-9_\.\*]+[.] ',
stripped_line):
if current_line_indentation != (
args_indentation_in_spaces + 4):
self.add_message(
'4-space-indentation-in-docstring',
node=node)
in_description = True
# In a description line that is wrapped around (doesn't
# start off with the parameter name), we need to make sure
# the indentation is 8.
elif in_description:
if current_line_indentation != (
args_indentation_in_spaces + 8):
self.add_message(
'8-space-indentation-in-docstring',
node=node)
# Check if we are in a docstring returns or yields section.
# NOTE: Each function should only have one yield or return
# object. If a tuple is returned, wrap both in a tuple parameter
# section.
elif (current_docstring_section and
(current_docstring_section ==
self.DOCSTRING_SECTION_RETURNS)
or (current_docstring_section ==
self.DOCSTRING_SECTION_YIELDS)):
# Check for the start of a new parameter definition in the
# format "type (elaboration)." and check the indentation.
if (re.search(br'^[a-zA-Z_() -:,\*]+\.',
stripped_line) and not in_description):
if current_line_indentation != (
args_indentation_in_spaces + 4):
self.add_message(
'4-space-indentation-in-docstring',
node=node)
# If the line ends with a colon, we can assume the rest
# of the section is free form.
if re.search(br':$', stripped_line):
in_freeform_section = True
in_description = True
# In a description line of a returns or yields, we keep the
# indentation the same as the definition line.
elif in_description:
if (current_line_indentation != (
args_indentation_in_spaces + 4)
and not in_freeform_section):
self.add_message(
'4-space-indentation-in-docstring',
node=node)
# If the description line ends with a colon, we can
# assume the rest of the section is free form.
if re.search(br':$', stripped_line):
in_freeform_section = True
# Check for the start of an Args: section and check the correct
# indentation.
elif stripped_line.startswith('Args:'):
args_indentation = current_line_indentation
# The current args indentation is incorrect.
if current_line_indentation % 4 != 0:
self.add_message(
'incorrect-indentation-for-arg-header-doc',
node=node)
# Since other checks are based on relative indentation,
# we need to fix this indentation first.
break
currently_in_args_section = True
# Check for parameter section header by checking that the
# parameter is in the function arguments set. We also check for
# arguments that start with * which means it's autofill and will
# not appear in the node args list so we handle those too.
elif (currently_in_args_section and parameter
and ((
parameter.group(0).strip('*')
in expected_argument_names) or
re.search(
br'\*[^ ]+: ',
stripped_line))):
words_in_line = stripped_line.split(' ')
currently_in_freeform_section = False
# Check if the current parameter section indentation is
# correct.
if current_line_indentation != (
args_indentation + 4):
# Use the first word in the line to identify the error.
beginning_of_line = (
words_in_line[0]
if words_in_line else None)
self.add_message(
'4-space-indentation-for-arg-parameters-doc',
node=node,
args=(beginning_of_line))
# If the line ends with a colon, that means
# the next subsection of description is free form.
if line.endswith(':'):
currently_in_freeform_section = True
# All other lines can be treated as description.
elif currently_in_args_section:
# If it is not a freeform section, we check the indentation.
words_in_line = stripped_line.split(' ')
if (not currently_in_freeform_section
and current_line_indentation != (
args_indentation + 8)):
# Use the first word in the line to identify the error.
beginning_of_line = (
words_in_line[0]
if words_in_line else None)
self.add_message(
'8-space-indentation-for-arg-in-descriptions-doc',
node=node,
args=(beginning_of_line))
# If the line ends with a colon, that
# means the next subsection of description is free form.
if line.endswith(':'):
currently_in_freeform_section = True
def check_functiondef_returns(self, node, node_doc):
"""Checks whether a function documented with a return value actually has
a return statement in its definition.
Args:
node: astroid.scoped_nodes.Function. Node for a function or
method definition in the AST.
node_doc: Docstring. Pylint Docstring class instance representing
a node's docstring.
"""
if not node_doc.supports_yields and node.is_generator():
return
return_nodes = node.nodes_of_class(astroid.Return)
if ((
node_doc.has_returns() or node_doc.has_rtype()) and
not any(
docstrings_checker.returns_something(
ret_node) for ret_node in return_nodes)):
self.add_message(
'redundant-returns-doc',
node=node)
def check_functiondef_yields(self, node, node_doc):
"""Checks whether a function documented with a yield value actually has
a yield statement in its definition.
Args:
node: astroid.scoped_nodes.Function. Node for a function or
method definition in the AST.
node_doc: Docstring. Pylint Docstring class instance representing
a node's docstring.
"""
if not node_doc.supports_yields:
return
if ((node_doc.has_yields() or node_doc.has_yields_type()) and
not node.is_generator()):
self.add_message(
'redundant-yields-doc',
node=node)
def visit_raise(self, node):
"""Visits a function node that raises an exception and verifies that all
exceptions raised in the function definition are documented.
Args:
node: astroid.scoped_nodes.Function. Node for a function or
method definition in the AST.
"""
func_node = node.frame()
if not isinstance(func_node, astroid.FunctionDef):
return
expected_excs = docstrings_checker.possible_exc_types(node)
if not expected_excs:
return
if not func_node.doc:
# If this is a property setter,
# the property should have the docstring instead.
setters_property = docstrings_checker.get_setters_property(
func_node)
if setters_property:
func_node = setters_property
doc = docstrings_checker.docstringify(func_node.doc)
if not doc.is_valid():
if doc.doc:
self._handle_no_raise_doc(expected_excs, func_node)
return
found_excs = doc.exceptions()
missing_excs = expected_excs - found_excs
self._add_raise_message(missing_excs, func_node)
def visit_return(self, node):
"""Visits a function node that contains a return statement and verifies
that the return value and the return type are documented.
Args:
node: astroid.scoped_nodes.Function. Node for a function or
method definition in the AST.
"""
if not docstrings_checker.returns_something(node):
return
func_node = node.frame()
doc = docstrings_checker.docstringify(func_node.doc)
if not doc.is_valid() and self.config.accept_no_return_doc:
return
is_property = checker_utils.decorated_with_property(func_node)
if not (doc.has_returns() or
(doc.has_property_returns() and is_property)):
self.add_message(
'missing-return-doc',
node=func_node
)
if not (doc.has_rtype() or
(doc.has_property_type() and is_property)):
self.add_message(
'missing-return-type-doc',
node=func_node
)
def visit_yield(self, node):
"""Visits a function node that contains a yield statement and verifies
that the yield value and the yield type are documented.
Args:
node: astroid.scoped_nodes.Function. Node for a function or
method definition in the AST.
"""
func_node = node.frame()
doc = docstrings_checker.docstringify(func_node.doc)
if not doc.is_valid() and self.config.accept_no_yields_doc:
return
doc_has_yields = doc.has_yields()
doc_has_yields_type = doc.has_yields_type()
if not doc_has_yields:
self.add_message(
'missing-yield-doc',
node=func_node
)
if not doc_has_yields_type:
self.add_message(
'missing-yield-type-doc',
node=func_node
)
def visit_yieldfrom(self, node):
"""Visits a function node that contains a yield from statement and
verifies that the yield from value and the yield from type are
documented.
Args:
node: astroid.scoped_nodes.Function. Node to access module content.
"""
self.visit_yield(node)
def check_arguments_in_docstring(
self, doc, arguments_node, warning_node, accept_no_param_doc=None):
"""Check that all parameters in a function, method or class constructor
on the one hand and the parameters mentioned in the parameter
documentation (e.g. the Sphinx tags 'param' and 'type') on the other
hand are consistent with each other.
* Undocumented parameters except 'self' are noticed.
* Undocumented parameter types except for 'self' and the ``*<args>``
and ``**<kwargs>`` parameters are noticed.
* Parameters mentioned in the parameter documentation that don't or no
longer exist in the function parameter list are noticed.
* If the text "For the parameters, see" or "For the other parameters,
see" (ignoring additional whitespace) is mentioned in the docstring,
missing parameter documentation is tolerated.
* If there's no Sphinx style, Google style or NumPy style parameter
documentation at all, i.e. ``:param`` is never mentioned etc., the
checker assumes that the parameters are documented in another format
and the absence is tolerated.
Args:
doc: str. Docstring for the function, method or class.
arguments_node: astroid.scoped_nodes.Arguments. Arguments node
for the function, method or class constructor.
warning_node: astroid.scoped_nodes.Node. The node to assign
the warnings to.
accept_no_param_doc: bool|None. Whether or not to allow
no parameters to be documented. If None then
this value is read from the configuration.
"""
# Tolerate missing param or type declarations if there is a link to
# another method carrying the same name.
if not doc.doc:
return
if accept_no_param_doc is None:
accept_no_param_doc = self.config.accept_no_param_doc
tolerate_missing_params = doc.params_documented_elsewhere()
# Collect the function arguments.
expected_argument_names = set(
arg.name for arg in arguments_node.args)
expected_argument_names.update(
arg.name for arg in arguments_node.kwonlyargs)
not_needed_type_in_docstring = (
self.not_needed_param_in_docstring.copy())
if arguments_node.vararg is not None:
expected_argument_names.add(arguments_node.vararg)
not_needed_type_in_docstring.add(arguments_node.vararg)
if arguments_node.kwarg is not None:
expected_argument_names.add(arguments_node.kwarg)
not_needed_type_in_docstring.add(arguments_node.kwarg)
params_with_doc, params_with_type = doc.match_param_docs()
# Tolerate no parameter documentation at all.
if (not params_with_doc and not params_with_type
and accept_no_param_doc):
tolerate_missing_params = True
def _compare_missing_args(
found_argument_names, message_id, not_needed_names):
"""Compare the found argument names with the expected ones and
generate a message if there are arguments missing.
Args:
found_argument_names: set. Argument names found in the
docstring.
message_id: str. Pylint message id.
not_needed_names: set(str). Names that may be omitted.
"""
if not tolerate_missing_params:
missing_argument_names = (
(expected_argument_names - found_argument_names)
- not_needed_names)
if missing_argument_names:
self.add_message(
message_id,
args=(', '.join(
sorted(missing_argument_names)),),
node=warning_node)
def _compare_different_args(
found_argument_names, message_id, not_needed_names):
"""Compare the found argument names with the expected ones and
generate a message if there are extra arguments found.
Args:
found_argument_names: set. Argument names found in the
docstring.
message_id: str. Pylint message id.
not_needed_names: set(str). Names that may be omitted.
"""
differing_argument_names = (
(expected_argument_names ^ found_argument_names)
- not_needed_names - expected_argument_names)
if differing_argument_names:
self.add_message(
message_id,
args=(', '.join(
sorted(differing_argument_names)),),
node=warning_node)
_compare_missing_args(
params_with_doc, 'missing-param-doc',
self.not_needed_param_in_docstring)
_compare_missing_args(
params_with_type, 'missing-type-doc', not_needed_type_in_docstring)
_compare_different_args(
params_with_doc, 'differing-param-doc',
self.not_needed_param_in_docstring)
_compare_different_args(
params_with_type, 'differing-type-doc',
not_needed_type_in_docstring)
def check_single_constructor_params(self, class_doc, init_doc, class_node):
"""Checks whether a class and corresponding init() method are
documented. If both of them are documented, it adds an error message.
Args:
class_doc: Docstring. Pylint docstring class instance representing
a class's docstring.
init_doc: Docstring. Pylint docstring class instance representing
a method's docstring, the method here is the constructor method
for the above class.
class_node: astroid.scoped_nodes.Function. Node for class definition
in AST.
"""
if class_doc.has_params() and init_doc.has_params():
self.add_message(
'multiple-constructor-doc',
args=(class_node.name,),
node=class_node)
def _handle_no_raise_doc(self, excs, node):
"""Checks whether the raised exception in a function has been
documented, add a message otherwise.
Args:
excs: list(str). A list of exception types.
node: astroid.scoped_nodes.Function. Node to access module content.
"""
if self.config.accept_no_raise_doc:
return
self._add_raise_message(excs, node)
def _add_raise_message(self, missing_excs, node):
"""Adds a message on :param:`node` for the missing exception type.
Args:
missing_excs: list(Exception). A list of missing exception types.
node: astroid.node_classes.NodeNG. The node show the message on.
"""
if not missing_excs:
return
self.add_message(
'missing-raises-doc',
args=(', '.join(sorted(missing_excs)),),
node=node)
class ImportOnlyModulesChecker(checkers.BaseChecker):
"""Checker for import-from statements. It checks that
modules are only imported.
"""
__implements__ = interfaces.IAstroidChecker
name = 'import-only-modules'
priority = -1
msgs = {
'C0003': (
'Import \"%s\" from \"%s\" is not a module.',
'import-only-modules',
'Modules should only be imported.',
),
}
@checker_utils.check_messages('import-only-modules')
def visit_importfrom(self, node):
"""Visits all import-from statements in a python file and checks that
modules are imported. It then adds a message accordingly.
Args:
node: astroid.node_classes.ImportFrom. Node for a import-from
statement in the AST.
"""
try:
imported_module = node.do_import_module(node.modname)
except astroid.AstroidBuildingException:
return
if node.level is None:
modname = node.modname
else:
modname = '.' * node.level + node.modname
for (name, _) in node.names:
if name == 'constants':
continue
try:
imported_module.import_module(name, True)
except astroid.AstroidImportError:
self.add_message(
'import-only-modules',
node=node,
args=(name, modname),
)
class BackslashContinuationChecker(checkers.BaseChecker):
"""Custom pylint checker which checks that backslash is not used
for continuation.
"""
__implements__ = interfaces.IRawChecker
name = 'backslash-continuation'
priority = -1
msgs = {
'C0004': (
(
'Backslash should not be used to break continuation lines. '
'Use braces to break long lines.'),
'backslash-continuation',
'Use braces to break long lines instead of backslash.'
),
}
def process_module(self, node):
"""Process a module.
Args:
node: astroid.scoped_nodes.Function. Node to access module content.
"""
file_content = read_from_node(node)
for (line_num, line) in enumerate(file_content):
if line.rstrip(b'\r\n').endswith(b'\\'):
self.add_message(
'backslash-continuation', line=line_num + 1)
class FunctionArgsOrderChecker(checkers.BaseChecker):
"""Custom pylint checker which checks the order of arguments in function
definition.
"""
__implements__ = interfaces.IAstroidChecker
name = 'function-args-order'
priority = -1
msgs = {
'C0005': (
'Wrong order of arguments in function definition '
'\'self\' should come first.',
'function-args-order-self',
'\'self\' should come first',),
'C0006': (
'Wrong order of arguments in function definition '
'\'cls\' should come first.',
'function-args-order-cls',
'\'cls\' should come first'),
}
def visit_functiondef(self, node):
"""Visits every function definition in the python file and check the
function arguments order. It then adds a message accordingly.
Args:
node: astroid.scoped_nodes.Function. Node for a function or method
definition in the AST.
"""
args_list = [args.name for args in node.args.args]
if 'self' in args_list and args_list[0] != 'self':
self.add_message('function-args-order-self', node=node)
elif 'cls' in args_list and args_list[0] != 'cls':
self.add_message('function-args-order-cls', node=node)
class RestrictedImportChecker(checkers.BaseChecker):
"""Custom pylint checker which checks layers importing modules
from their respective restricted layers.
"""
__implements__ = interfaces.IAstroidChecker
name = 'invalid-import'
priority = -1
msgs = {
'C0009': (
'Importing %s layer in %s layer is prohibited.',
'invalid-import',
'Storage layer and domain layer must not import'
'domain layer and controller layer respectively.'),
}
def visit_import(self, node):
"""Visits every import statement in the file.
Args:
node: astroid.node_classes.Import. Node for a import statement
in the AST.
"""
modnode = node.root()
names = [name for name, _ in node.names]
# Checks import of domain layer in storage layer.
if 'oppia.core.storage' in modnode.name and not '_test' in modnode.name:
if any('core.domain' in name for name in names):
self.add_message(
'invalid-import',
node=node,
args=('domain', 'storage'),
)
# Checks import of controller layer in domain layer.
if 'oppia.core.domain' in modnode.name and not '_test' in modnode.name:
if any('core.controllers' in name for name in names):
self.add_message(
'invalid-import',
node=node,
args=('controller', 'domain'),
)
def visit_importfrom(self, node):
"""Visits all import-from statements in a python file and checks that
modules are imported. It then adds a message accordingly.
Args:
node: astroid.node_classes.ImportFrom. Node for a import-from
statement in the AST.
"""
modnode = node.root()
if 'oppia.core.storage' in modnode.name and not '_test' in modnode.name:
if 'core.domain' in node.modname:
self.add_message(
'invalid-import',
node=node,
args=('domain', 'storage'),
)
if 'oppia.core.domain' in modnode.name and not '_test' in modnode.name:
if 'core.controllers' in node.modname:
self.add_message(
'invalid-import',
node=node,
args=('controller', 'domain'),
)
class SingleCharAndNewlineAtEOFChecker(checkers.BaseChecker):
"""Checker for single character files and newline at EOF."""
__implements__ = interfaces.IRawChecker
name = 'newline-at-eof'
priority = -1
msgs = {
'C0007': (
'Files should end in a single newline character.',
'newline-at-eof',
'Please enter a single newline at the end of the file.'),
'C0008': (
'Only one character in file',
'only-one-character',
'Files with only one character are not allowed.'),
}
def process_module(self, node):
"""Process a module.
Args:
node: astroid.scoped_nodes.Function. Node to access module content.
"""
file_content = read_from_node(node)
file_length = len(file_content)
if file_length == 1 and len(file_content[0]) == 1:
self.add_message('only-one-character', line=file_length)
if file_length >= 2 and not re.search(r'[^\n]\n', file_content[-1]):
self.add_message('newline-at-eof', line=file_length)
class SingleSpaceAfterYieldChecker(checkers.BaseChecker):
"""Checks if only one space is used after a yield statement
when applicable ('yield' is acceptable).
"""
__implements__ = interfaces.IAstroidChecker
name = 'single-space-after-yield'
priority = -1
msgs = {
'C0010': (
'Not using \'yield\' or a single space after yield statement.',
'single-space-after-yield',
'Ensure a single space is used after yield statement.',
),
}
def visit_yield(self, node):
"""Visit every yield statement to ensure that yield keywords are
followed by exactly one space, so matching 'yield *' where * is not a
whitespace character. Note that 'yield' is also acceptable in
cases where the user wants to yield nothing.
Args:
node: astroid.nodes.Yield. Nodes to access yield statements.
content.
"""
line_number = node.fromlineno
line = linecache.getline(node.root().file, line_number).lstrip()
if (line.startswith(b'yield') and
not re.search(br'^(yield)( \S|$|\w)', line)):
self.add_message('single-space-after-yield', node=node)
class DivisionOperatorChecker(checkers.BaseChecker):
"""Checks if division operator is used."""
__implements__ = interfaces.IAstroidChecker
name = 'division-operator-used'
priority = -1
msgs = {
'C0015': (
'Division Operator is used.',
'division-operator-used',
'Please use python_utils.divide() instead of the "/" operator'
)
}
def visit_binop(self, node):
"""Visit assign statements to ensure that the division operator('/')
is not used and python_utils.divide() is used instead.
Args:
node: astroid.node.BinOp. Node to access module content.
"""
if node.op == b'/':
self.add_message(
'division-operator-used', node=node)
class SingleLineCommentChecker(checkers.BaseChecker):
"""Checks if comments follow correct style."""
__implements__ = interfaces.ITokenChecker
name = 'incorrectly_styled_comment'
priority = -1
msgs = {
'C0016': (
'Invalid punctuation is used.',
'invalid-punctuation-used',
'Please use valid punctuation.'
),
'C0017': (
'No space is used at beginning of comment.',
'no-space-at-beginning',
'Please use single space at the beginning of comment.'
),
'C0018': (
'No capital letter is used at the beginning of comment.',
'no-capital-letter-at-beginning',
'Please use capital letter to begin the content of comment.'
)
}
options = ((
'allowed-comment-prefixes',
{
'default': ('int', 'str', 'float', 'bool', 'v'),
'type': 'csv', 'metavar': '<comma separated list>',
'help': 'List of allowed prefixes in a comment.'
}
),)
def _check_space_at_beginning_of_comments(self, line, line_num):
"""Checks if the comment starts with a space.
Args:
line: str. The current line of comment.
line_num: int. Line number of the current comment.
"""
if re.search(br'^#[^\s].*$', line) and not line.startswith(b'#!'):
self.add_message(
'no-space-at-beginning', line=line_num)
def _check_comment_starts_with_capital_letter(self, line, line_num):
"""Checks if the comment starts with a capital letter.
Comments may include a lowercase character at the beginning only if they
start with version info or a data type or a variable name e.g.
"# next_line is of string type." or "# v2 version does not have
ExplorationStats Model." or "# int. The file size, in bytes.".
Args:
line: str. The current line of comment.
line_num: int. Line number of the current comment.
"""
# Check if variable name is used.
if line[1:].startswith(b' '):
starts_with_underscore = '_' in line.split()[1]
else:
starts_with_underscore = '_' in line.split()[0]
# Check if allowed prefix is used.
allowed_prefix_is_present = any(
line[2:].startswith(word) for word in
self.config.allowed_comment_prefixes)
# Check if comment contains any excluded phrase.
excluded_phrase_is_present = any(
line[1:].strip().startswith(word) for word in EXCLUDED_PHRASES)
if (re.search(br'^# [a-z].*', line) and not (
excluded_phrase_is_present or
starts_with_underscore or allowed_prefix_is_present)):
self.add_message(
'no-capital-letter-at-beginning', line=line_num)
def _check_punctuation(self, line, line_num):
"""Checks if the comment starts with a correct punctuation.
Args:
line: str. The current line of comment.
line_num: int. Line number of the current comment.
"""
excluded_phrase_is_present_at_end = any(
word in line for word in EXCLUDED_PHRASES)
# Comments must end with the proper punctuation.
last_char_is_invalid = line[-1] not in (
ALLOWED_TERMINATING_PUNCTUATIONS)
excluded_phrase_at_beginning_of_line = any(
line[1:].startswith(word) for word in EXCLUDED_PHRASES)
if (last_char_is_invalid and not (
excluded_phrase_is_present_at_end or
excluded_phrase_at_beginning_of_line)):
self.add_message('invalid-punctuation-used', line=line_num)
def process_tokens(self, tokens):
"""Custom pylint checker to ensure that comments follow correct style.
Args:
tokens: list(Token). Object to access all tokens of a module.
"""
prev_line_num = -1
comments_group_list = []
comments_index = -1
for (token_type, _, (line_num, _), _, line) in tokens:
if token_type == tokenize.COMMENT and line.strip().startswith('#'):
line = line.strip()
self._check_space_at_beginning_of_comments(line, line_num)
if prev_line_num + 1 == line_num:
comments_group_list[comments_index].append((line, line_num))
else:
comments_group_list.append([(line, line_num)])
comments_index += 1
prev_line_num = line_num
for comments in comments_group_list:
# Checks first line of comment.
self._check_comment_starts_with_capital_letter(*comments[0])
# Checks last line of comment.
self._check_punctuation(*comments[-1])
class BlankLineBelowFileOverviewChecker(checkers.BaseChecker):
"""Checks if there is a single empty line below the fileoverview docstring.
Note: The check assumes that all files have a file overview. This
assumption is justified because Pylint has an inbuilt check
(missing-docstring) for missing file overviews.
"""
__implements__ = interfaces.IAstroidChecker
name = 'space_between_imports_and_file-overview'
priority = -1
msgs = {
'C0024': (
'No empty line used below the fileoverview docstring.',
'no-empty-line-provided-below-fileoverview',
'please provide an empty line below the fileoverview.'
),
'C0025': (
'Single empty line should be provided below the fileoverview.',
'only-a-single-empty-line-should-be-provided',
'please provide an empty line below the fileoverview.'
)
}
def visit_module(self, node):
"""Visit a module to ensure that there is a blank line below
file overview docstring.
Args:
node: astroid.scoped_nodes.Function. Node to access module content.
"""
# Check if the given node has docstring.
if node.doc is None:
return
line_number = node.fromlineno
# Iterate till the start of docstring.
while True:
line = linecache.getline(node.root().file, line_number).strip()
if line.startswith((b'\'', b'"')):
break
else:
line_number += 1
doc_length = len(node.doc.split(b'\n'))
line_number += doc_length
first_line_after_doc = linecache.getline(
node.root().file, line_number).strip()
second_line_after_doc = linecache.getline(
node.root().file, line_number + 1).strip()
if first_line_after_doc != b'':
self.add_message(
'no-empty-line-provided-below-fileoverview', node=node)
elif second_line_after_doc == b'':
self.add_message(
'only-a-single-empty-line-should-be-provided', node=node)
class SingleLinePragmaChecker(checkers.BaseChecker):
"""Custom pylint checker which checks if pylint pragma is used to disable
a rule for a single line only.
"""
__implements__ = interfaces.ITokenChecker
name = 'single-line-pragma'
priority = -1
msgs = {
'C0028': (
'Pylint pragmas should be used to disable a rule '
'for a single line only',
'single-line-pragma',
'Please use pylint pragmas to disable a rule for a single line only'
)
}
def process_tokens(self, tokens):
"""Custom pylint checker which allows paramas to disable a rule for a
single line only.
Args:
tokens: Token. Object to access all tokens of a module.
"""
for (token_type, _, (line_num, _), _, line) in tokens:
if token_type == tokenize.COMMENT:
line = line.lstrip()
# Ignore line that is enabling this check.
# Example:
# # pylint: disable=import-only-modules, single-line-pragma
# def func(a, b):
# # pylint: enable=import-only-modules, single-line-pragma
# Now if do not ignore the line with 'enable' statement
# pylint will raise the error of single-line-pragma because
# from here on all this lint check is enabled. So we need to
# ignore this line.
if re.search(br'^(#\s*pylint:)', line):
if 'enable' in line and 'single-line-pragma' in line:
continue
self.add_message(
'single-line-pragma', line=line_num)
def register(linter):
"""Registers the checker with pylint.
Args:
linter: Pylinter. The Pylinter object.
"""
linter.register_checker(ExplicitKeywordArgsChecker(linter))
linter.register_checker(HangingIndentChecker(linter))
linter.register_checker(DocstringParameterChecker(linter))
linter.register_checker(ImportOnlyModulesChecker(linter))
linter.register_checker(BackslashContinuationChecker(linter))
linter.register_checker(FunctionArgsOrderChecker(linter))
linter.register_checker(RestrictedImportChecker(linter))
linter.register_checker(SingleCharAndNewlineAtEOFChecker(linter))
linter.register_checker(SingleSpaceAfterYieldChecker(linter))
linter.register_checker(DivisionOperatorChecker(linter))
linter.register_checker(SingleLineCommentChecker(linter))
linter.register_checker(BlankLineBelowFileOverviewChecker(linter))
linter.register_checker(SingleLinePragmaChecker(linter))
|
prasanna08/oppia
|
scripts/linters/pylint_extensions.py
|
Python
|
apache-2.0
| 78,064
|
# Copyright 2015-2016 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
NWA_DEVICE_GDV = "GeneralDev"
NWA_DEVICE_TFW = "TenantFW"
NWA_AGENT_TOPIC = 'nwa_agent'
NWA_AGENT_TYPE = 'NEC NWA Agent'
NWA_FIREWALL_PLUGIN = 'NECNWAFWaaS'
# an incremental size if the remaining size is zero.
NWA_GREENPOOL_ADD_SIZE = 32
|
openstack/networking-nec
|
networking_nec/nwa/common/constants.py
|
Python
|
apache-2.0
| 877
|
import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=8, path_list=[
[TestAction.create_vm, 'vm1', 'flag=ceph'],
[TestAction.create_volume, 'volume1', 'flag=ceph,scsi'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.create_volume, 'volume2', 'flag=ceph,scsi'],
[TestAction.attach_volume, 'vm1', 'volume2'],
[TestAction.create_volume, 'volume3', 'flag=ceph,scsi'],
[TestAction.attach_volume, 'vm1', 'volume3'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot1'],
[TestAction.clone_vm, 'vm1', 'vm2'],
[TestAction.create_volume_backup, 'volume2', 'volume2-backup1'],
[TestAction.stop_vm, 'vm1'],
[TestAction.use_volume_backup, 'volume2-backup1'],
[TestAction.start_vm, 'vm1'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot5'],
[TestAction.delete_vm_snapshot, 'vm1-snapshot1'],
[TestAction.create_vm_snapshot, 'vm2', 'vm2-snapshot9'],
[TestAction.clone_vm, 'vm1', 'vm3', 'full'],
[TestAction.delete_volume_snapshot, 'vm1-snapshot5'],
[TestAction.stop_vm, 'vm2'],
[TestAction.change_vm_image, 'vm2'],
[TestAction.delete_vm_snapshot, 'vm2-snapshot9'],
])
'''
The final status:
Running:['vm1', 'vm3']
Stopped:['vm2']
Enadbled:['volume1-snapshot5', 'volume2-snapshot5', 'volume3-snapshot5', 'volume2-backup1']
attached:['volume1', 'volume2', 'volume3', 'clone@volume1', 'clone@volume2', 'clone@volume3']
Detached:[]
Deleted:['vm1-snapshot1', 'volume1-snapshot1', 'volume2-snapshot1', 'volume3-snapshot1', 'vm1-snapshot5', 'vm2-snapshot9']
Expunged:[]
Ha:[]
Group:
'''
|
zstackio/zstack-woodpecker
|
integrationtest/vm/multihosts/vm_snapshots/paths/xc_path8.py
|
Python
|
apache-2.0
| 1,642
|
"""
JsonCli: Library for CLI based on JSON
--------------------------------------
+------------------------+-------------+
| This is the JsonCli common library. |
+------------------------+-------------+
"""
import argparse
from collections import OrderedDict
from argcomplete import autocomplete
from botocore import xform_name
type_map = {
'structure': str,
'map': str,
'timestamp': str,
'list': str,
'string': str,
'float': float,
'integer': int,
'long': int,
'boolean': bool,
'double': float,
'blob': str}
class OrderNamespace(argparse.Namespace):
"""
Namespace with Order: from argparse.Namespace
"""
__order__ = OrderedDict()
def __init__(self, **kwargs):
super(OrderNamespace, self).__init__(**kwargs)
def __setattr__(self, attr, value):
if value is not None:
self.__order__[attr] = value
super(OrderNamespace, self).__setattr__(attr, value)
def add_arguments(group, args):
"""
Add Arguments to CLI
"""
for kkk, vvv in args.iteritems():
if 'type' in vvv and vvv['type'] in type_map:
vvv['type'] = type_map[vvv['type']]
if 'help' in vvv and not vvv['help']:
vvv['help'] = argparse.SUPPRESS
changed = xform_name(kkk, "-")
if kkk != changed:
kkk = "-".join(["", changed])
group.add_argument(kkk, **vvv)
return group
def recursive_parser(parser, args):
"""
Recursive CLI Parser
"""
subparser = parser.add_subparsers(help=args.get(
'__help__', ''), dest=args.get('__dest__', ''))
for k, v in args.iteritems():
if k == '__help__' or k == '__dest__':
continue
group = subparser.add_parser(k, help=v.get('help', ''))
for kk, vv in v.iteritems():
if kk == 'Subparsers':
group = recursive_parser(group, vv)
elif kk == 'Arguments':
group = add_arguments(group, vv)
return parser
def parse_args(args):
"""
Create the Command Line Interface
:type args: dict
:param args: describes the command structure for the CLI
"""
parser = argparse.ArgumentParser(description=args.get('Description', ''))
for k, v in args.iteritems():
if k == 'Subparsers':
parser = recursive_parser(parser, v)
elif k == 'Arguments':
parser = add_arguments(parser, v)
autocomplete(parser)
return parser.parse_args(None, OrderNamespace())
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
henrysher/opslib
|
opslib/icsutils/jsoncli.py
|
Python
|
apache-2.0
| 2,568
|
class Hayvan:
def __init__(self,isim, renk):
self.isim=isim
self.renk=renk
def yuru(self):
print(self.isim+" yurumeye basladi")
def ye(self):
print(self.isim+" yemeye basladi")
class Fare(Hayvan):
def __init__(self,isim,renk):
super().__init__(isim,renk)
def yuru(self):
print(self.isim+" hizlica yurudu")
my_fare=Fare("siyah Avrasya sert sicani ","mavi")
my_fare.yuru();
my_fare.ye()
|
zinderud/ysa
|
python/first/inheritance.py
|
Python
|
apache-2.0
| 468
|
# Copyright 2012-2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from openstackclient.common import clientmanager
from openstackclient.tests import utils
class Container(object):
attr = clientmanager.ClientCache(lambda x: object())
def __init__(self):
pass
class TestClientManager(utils.TestCase):
def setUp(self):
super(TestClientManager, self).setUp()
def test_singleton(self):
# NOTE(dtroyer): Verify that the ClientCache descriptor only invokes
# the factory one time and always returns the same value after that.
c = Container()
self.assertEqual(c.attr, c.attr)
|
citrix-openstack-build/python-openstackclient
|
openstackclient/tests/common/test_clientmanager.py
|
Python
|
apache-2.0
| 1,180
|
from . import statuscodes
from .exceptions import *
from . import basedatamodel
|
Zashel/zrest
|
zrest/__init__.py
|
Python
|
apache-2.0
| 80
|
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: dan@reciprocitylabs.com
from ggrc import db
from .mixins import Mapping
class SectionObjective(Mapping, db.Model):
__tablename__ = 'section_objectives'
@staticmethod
def _extra_table_args(cls):
return (
db.UniqueConstraint('section_id', 'objective_id'),
db.Index('ix_section_id', 'section_id'),
db.Index('ix_objective_id', 'objective_id'),
)
section_id = db.Column(db.Integer, db.ForeignKey('sections.id'), nullable = False)
objective_id = db.Column(db.Integer, db.ForeignKey('objectives.id'), nullable = False)
_publish_attrs = [
'section',
'objective',
]
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(SectionObjective, cls).eager_query()
return query.options(
orm.subqueryload('section'),
orm.subqueryload('objective'))
def _display_name(self):
return self.section.display_name + '<->' + self.objective.display_name
|
vladan-m/ggrc-core
|
src/ggrc/models/section_objective.py
|
Python
|
apache-2.0
| 1,173
|
#!/usr/bin/env python
# coding=utf-8
import threading
import time
class timer(threading.Thread): #The timer class is derived from the class threading.Thread
def __init__(self, num, interval):
threading.Thread.__init__(self)
self.thread_num = num
self.interval = interval
self.thread_stop = False
def run(self): #Overwrite run() method, put what you want the thread do here
while not self.thread_stop:
print 'Thread Object(%d), Time:%s/n' %(self.thread_num, time.ctime())
time.sleep(self.interval)
def stop(self):
self.thread_stop = True
def test():
thread1 = timer(1, 1)
thread2 = timer(2, 2)
thread1.start()
thread2.start()
time.sleep(10)
thread1.stop()
thread2.stop()
return
if __name__ == '__main__':
test()
|
zhaochl/python-utils
|
utils/thread/time_thread.py
|
Python
|
apache-2.0
| 846
|
from __future__ import unicode_literals
from .request import Request
from .response import Response
from .stat import Stat
from .primitives import Bool, UString, Vector
class GetChildrenRequest(Request):
"""
"""
opcode = 8
parts = (
("path", UString),
("watch", Bool),
)
class GetChildrenResponse(Response):
"""
"""
opcode = 8
parts = (
("children", Vector.of(UString)),
)
class GetChildren2Request(Request):
"""
"""
opcode = 12
parts = (
("path", UString),
("watch", Bool),
)
class GetChildren2Response(Response):
"""
"""
opcode = 12
parts = (
("children", Vector.of(UString)),
("stat", Stat),
)
|
wglass/zoonado
|
zoonado/protocol/children.py
|
Python
|
apache-2.0
| 746
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
setup(name='scalegrease',
version='1',
url='https://github.com/spotify/scalegrease',
description='A tool chain for executing batch processing jobs',
packages=['scalegrease'],
data_files=[('/etc', ['conf/scalegrease.json'])],
scripts=[
'bin/greaserun',
'bin/greaseworker'
]
)
|
sisidra/scalegrease
|
setup.py
|
Python
|
apache-2.0
| 415
|
from moto.core.responses import BaseResponse
class ReservedInstances(BaseResponse):
def cancel_reserved_instances_listing(self):
if self.is_not_dryrun("CancelReservedInstances"):
raise NotImplementedError(
"ReservedInstances.cancel_reserved_instances_listing is not yet implemented"
)
def create_reserved_instances_listing(self):
if self.is_not_dryrun("CreateReservedInstances"):
raise NotImplementedError(
"ReservedInstances.create_reserved_instances_listing is not yet implemented"
)
def describe_reserved_instances(self):
raise NotImplementedError(
"ReservedInstances.describe_reserved_instances is not yet implemented"
)
def describe_reserved_instances_listings(self):
raise NotImplementedError(
"ReservedInstances.describe_reserved_instances_listings is not yet implemented"
)
def describe_reserved_instances_offerings(self):
raise NotImplementedError(
"ReservedInstances.describe_reserved_instances_offerings is not yet implemented"
)
def purchase_reserved_instances_offering(self):
if self.is_not_dryrun("PurchaseReservedInstances"):
raise NotImplementedError(
"ReservedInstances.purchase_reserved_instances_offering is not yet implemented"
)
|
spulec/moto
|
moto/ec2/responses/reserved_instances.py
|
Python
|
apache-2.0
| 1,409
|
# Copyright (C) 2018 DataArt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
class Notification(object):
"""Notification class."""
DEVICE_ID_KEY = 'deviceId'
ID_KEY = 'id'
NOTIFICATION_KEY = 'notification'
PARAMETERS_KEY = 'parameters'
TIMESTAMP_KEY = 'timestamp'
def __init__(self, notification):
self._device_id = notification[self.DEVICE_ID_KEY]
self._id = notification[self.ID_KEY]
self._notification = notification[self.NOTIFICATION_KEY]
self._parameters = notification[self.PARAMETERS_KEY]
self._timestamp = notification[self.TIMESTAMP_KEY]
@property
def device_id(self):
return self._device_id
@property
def id(self):
return self._id
@property
def notification(self):
return self._notification
@property
def parameters(self):
return self._parameters
@property
def timestamp(self):
return self._timestamp
|
devicehive/devicehive-python
|
devicehive/notification.py
|
Python
|
apache-2.0
| 1,546
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import re
import unittest
from collections import OrderedDict
from unittest.mock import patch
import pytest
from airflow.exceptions import AirflowException
from airflow.providers.apache.hive.operators.hive_stats import HiveStatsCollectionOperator
from tests.providers.apache.hive import DEFAULT_DATE, DEFAULT_DATE_DS, TestHiveEnvironment
from tests.test_utils.mock_hooks import MockHiveMetastoreHook, MockMySqlHook, MockPrestoHook
class _FakeCol:
def __init__(self, col_name, col_type):
self.name = col_name
self.type = col_type
fake_col = _FakeCol('col', 'string')
class TestHiveStatsCollectionOperator(TestHiveEnvironment):
def setUp(self):
self.kwargs = dict(
table='table',
partition=dict(col='col', value='value'),
metastore_conn_id='metastore_conn_id',
presto_conn_id='presto_conn_id',
mysql_conn_id='mysql_conn_id',
task_id='test_hive_stats_collection_operator',
)
super().setUp()
def test_get_default_exprs(self):
col = 'col'
default_exprs = HiveStatsCollectionOperator(**self.kwargs).get_default_exprs(col, None)
assert default_exprs == {(col, 'non_null'): f'COUNT({col})'}
def test_get_default_exprs_excluded_cols(self):
col = 'excluded_col'
self.kwargs.update(dict(excluded_columns=[col]))
default_exprs = HiveStatsCollectionOperator(**self.kwargs).get_default_exprs(col, None)
assert default_exprs == {}
def test_get_default_exprs_number(self):
col = 'col'
for col_type in ['double', 'int', 'bigint', 'float']:
default_exprs = HiveStatsCollectionOperator(**self.kwargs).get_default_exprs(col, col_type)
assert default_exprs == {
(col, 'avg'): f'AVG({col})',
(col, 'max'): f'MAX({col})',
(col, 'min'): f'MIN({col})',
(col, 'non_null'): f'COUNT({col})',
(col, 'sum'): f'SUM({col})',
}
def test_get_default_exprs_boolean(self):
col = 'col'
col_type = 'boolean'
default_exprs = HiveStatsCollectionOperator(**self.kwargs).get_default_exprs(col, col_type)
assert default_exprs == {
(col, 'false'): f'SUM(CASE WHEN NOT {col} THEN 1 ELSE 0 END)',
(col, 'non_null'): f'COUNT({col})',
(col, 'true'): f'SUM(CASE WHEN {col} THEN 1 ELSE 0 END)',
}
def test_get_default_exprs_string(self):
col = 'col'
col_type = 'string'
default_exprs = HiveStatsCollectionOperator(**self.kwargs).get_default_exprs(col, col_type)
assert default_exprs == {
(col, 'approx_distinct'): f'APPROX_DISTINCT({col})',
(col, 'len'): f'SUM(CAST(LENGTH({col}) AS BIGINT))',
(col, 'non_null'): f'COUNT({col})',
}
@patch('airflow.providers.apache.hive.operators.hive_stats.json.dumps')
@patch('airflow.providers.apache.hive.operators.hive_stats.MySqlHook')
@patch('airflow.providers.apache.hive.operators.hive_stats.PrestoHook')
@patch('airflow.providers.apache.hive.operators.hive_stats.HiveMetastoreHook')
def test_execute(self, mock_hive_metastore_hook, mock_presto_hook, mock_mysql_hook, mock_json_dumps):
mock_hive_metastore_hook.return_value.get_table.return_value.sd.cols = [fake_col]
mock_mysql_hook.return_value.get_records.return_value = False
hive_stats_collection_operator = HiveStatsCollectionOperator(**self.kwargs)
hive_stats_collection_operator.execute(context={})
mock_hive_metastore_hook.assert_called_once_with(
metastore_conn_id=hive_stats_collection_operator.metastore_conn_id
)
mock_hive_metastore_hook.return_value.get_table.assert_called_once_with(
table_name=hive_stats_collection_operator.table
)
mock_presto_hook.assert_called_once_with(presto_conn_id=hive_stats_collection_operator.presto_conn_id)
mock_mysql_hook.assert_called_once_with(hive_stats_collection_operator.mysql_conn_id)
mock_json_dumps.assert_called_once_with(hive_stats_collection_operator.partition, sort_keys=True)
field_types = {
col.name: col.type for col in mock_hive_metastore_hook.return_value.get_table.return_value.sd.cols
}
exprs = {('', 'count'): 'COUNT(*)'}
for col, col_type in list(field_types.items()):
exprs.update(hive_stats_collection_operator.get_default_exprs(col, col_type))
exprs = OrderedDict(exprs)
rows = [
(
hive_stats_collection_operator.ds,
hive_stats_collection_operator.dttm,
hive_stats_collection_operator.table,
mock_json_dumps.return_value,
)
+ (r[0][0], r[0][1], r[1])
for r in zip(exprs, mock_presto_hook.return_value.get_first.return_value)
]
mock_mysql_hook.return_value.insert_rows.assert_called_once_with(
table='hive_stats',
rows=rows,
target_fields=[
'ds',
'dttm',
'table_name',
'partition_repr',
'col',
'metric',
'value',
],
)
@patch('airflow.providers.apache.hive.operators.hive_stats.json.dumps')
@patch('airflow.providers.apache.hive.operators.hive_stats.MySqlHook')
@patch('airflow.providers.apache.hive.operators.hive_stats.PrestoHook')
@patch('airflow.providers.apache.hive.operators.hive_stats.HiveMetastoreHook')
def test_execute_with_assignment_func(
self, mock_hive_metastore_hook, mock_presto_hook, mock_mysql_hook, mock_json_dumps
):
def assignment_func(col, _):
return {(col, 'test'): f'TEST({col})'}
self.kwargs.update(dict(assignment_func=assignment_func))
mock_hive_metastore_hook.return_value.get_table.return_value.sd.cols = [fake_col]
mock_mysql_hook.return_value.get_records.return_value = False
hive_stats_collection_operator = HiveStatsCollectionOperator(**self.kwargs)
hive_stats_collection_operator.execute(context={})
field_types = {
col.name: col.type for col in mock_hive_metastore_hook.return_value.get_table.return_value.sd.cols
}
exprs = {('', 'count'): 'COUNT(*)'}
for col, col_type in list(field_types.items()):
exprs.update(hive_stats_collection_operator.assignment_func(col, col_type))
exprs = OrderedDict(exprs)
rows = [
(
hive_stats_collection_operator.ds,
hive_stats_collection_operator.dttm,
hive_stats_collection_operator.table,
mock_json_dumps.return_value,
)
+ (r[0][0], r[0][1], r[1])
for r in zip(exprs, mock_presto_hook.return_value.get_first.return_value)
]
mock_mysql_hook.return_value.insert_rows.assert_called_once_with(
table='hive_stats',
rows=rows,
target_fields=[
'ds',
'dttm',
'table_name',
'partition_repr',
'col',
'metric',
'value',
],
)
@patch('airflow.providers.apache.hive.operators.hive_stats.json.dumps')
@patch('airflow.providers.apache.hive.operators.hive_stats.MySqlHook')
@patch('airflow.providers.apache.hive.operators.hive_stats.PrestoHook')
@patch('airflow.providers.apache.hive.operators.hive_stats.HiveMetastoreHook')
def test_execute_with_assignment_func_no_return_value(
self, mock_hive_metastore_hook, mock_presto_hook, mock_mysql_hook, mock_json_dumps
):
def assignment_func(_, __):
pass
self.kwargs.update(dict(assignment_func=assignment_func))
mock_hive_metastore_hook.return_value.get_table.return_value.sd.cols = [fake_col]
mock_mysql_hook.return_value.get_records.return_value = False
hive_stats_collection_operator = HiveStatsCollectionOperator(**self.kwargs)
hive_stats_collection_operator.execute(context={})
field_types = {
col.name: col.type for col in mock_hive_metastore_hook.return_value.get_table.return_value.sd.cols
}
exprs = {('', 'count'): 'COUNT(*)'}
for col, col_type in list(field_types.items()):
exprs.update(hive_stats_collection_operator.get_default_exprs(col, col_type))
exprs = OrderedDict(exprs)
rows = [
(
hive_stats_collection_operator.ds,
hive_stats_collection_operator.dttm,
hive_stats_collection_operator.table,
mock_json_dumps.return_value,
)
+ (r[0][0], r[0][1], r[1])
for r in zip(exprs, mock_presto_hook.return_value.get_first.return_value)
]
mock_mysql_hook.return_value.insert_rows.assert_called_once_with(
table='hive_stats',
rows=rows,
target_fields=[
'ds',
'dttm',
'table_name',
'partition_repr',
'col',
'metric',
'value',
],
)
@patch('airflow.providers.apache.hive.operators.hive_stats.MySqlHook')
@patch('airflow.providers.apache.hive.operators.hive_stats.PrestoHook')
@patch('airflow.providers.apache.hive.operators.hive_stats.HiveMetastoreHook')
def test_execute_no_query_results(self, mock_hive_metastore_hook, mock_presto_hook, mock_mysql_hook):
mock_hive_metastore_hook.return_value.get_table.return_value.sd.cols = [fake_col]
mock_mysql_hook.return_value.get_records.return_value = False
mock_presto_hook.return_value.get_first.return_value = None
with pytest.raises(AirflowException):
HiveStatsCollectionOperator(**self.kwargs).execute(context={})
@patch('airflow.providers.apache.hive.operators.hive_stats.json.dumps')
@patch('airflow.providers.apache.hive.operators.hive_stats.MySqlHook')
@patch('airflow.providers.apache.hive.operators.hive_stats.PrestoHook')
@patch('airflow.providers.apache.hive.operators.hive_stats.HiveMetastoreHook')
def test_execute_delete_previous_runs_rows(
self, mock_hive_metastore_hook, mock_presto_hook, mock_mysql_hook, mock_json_dumps
):
mock_hive_metastore_hook.return_value.get_table.return_value.sd.cols = [fake_col]
mock_mysql_hook.return_value.get_records.return_value = True
hive_stats_collection_operator = HiveStatsCollectionOperator(**self.kwargs)
hive_stats_collection_operator.execute(context={})
sql = f"""
DELETE FROM hive_stats
WHERE
table_name='{hive_stats_collection_operator.table}' AND
partition_repr='{mock_json_dumps.return_value}' AND
dttm='{hive_stats_collection_operator.dttm}';
"""
mock_mysql_hook.return_value.run.assert_called_once_with(sql)
@unittest.skipIf(
'AIRFLOW_RUNALL_TESTS' not in os.environ, "Skipped because AIRFLOW_RUNALL_TESTS is not set"
)
@patch(
'airflow.providers.apache.hive.operators.hive_stats.HiveMetastoreHook',
side_effect=MockHiveMetastoreHook,
)
def test_runs_for_hive_stats(self, mock_hive_metastore_hook):
mock_mysql_hook = MockMySqlHook()
mock_presto_hook = MockPrestoHook()
with patch(
'airflow.providers.apache.hive.operators.hive_stats.PrestoHook', return_value=mock_presto_hook
):
with patch(
'airflow.providers.apache.hive.operators.hive_stats.MySqlHook', return_value=mock_mysql_hook
):
op = HiveStatsCollectionOperator(
task_id='hive_stats_check',
table="airflow.static_babynames_partitioned",
partition={'ds': DEFAULT_DATE_DS},
dag=self.dag,
)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
select_count_query = (
"SELECT COUNT(*) AS __count "
"FROM airflow.static_babynames_partitioned "
"WHERE ds = '2015-01-01';"
)
mock_presto_hook.get_first.assert_called_with(hql=select_count_query)
expected_stats_select_query = (
"SELECT 1 "
"FROM hive_stats "
"WHERE table_name='airflow.static_babynames_partitioned' "
" AND partition_repr='{\"ds\": \"2015-01-01\"}' "
" AND dttm='2015-01-01T00:00:00+00:00' "
"LIMIT 1;"
)
raw_stats_select_query = mock_mysql_hook.get_records.call_args_list[0][0][0]
actual_stats_select_query = re.sub(r'\s{2,}', ' ', raw_stats_select_query).strip()
assert expected_stats_select_query == actual_stats_select_query
insert_rows_val = [
(
'2015-01-01',
'2015-01-01T00:00:00+00:00',
'airflow.static_babynames_partitioned',
'{"ds": "2015-01-01"}',
'',
'count',
['val_0', 'val_1'],
)
]
mock_mysql_hook.insert_rows.assert_called_with(
table='hive_stats',
rows=insert_rows_val,
target_fields=[
'ds',
'dttm',
'table_name',
'partition_repr',
'col',
'metric',
'value',
],
)
|
apache/incubator-airflow
|
tests/providers/apache/hive/operators/test_hive_stats.py
|
Python
|
apache-2.0
| 14,564
|
#!/usr/bin/python3
# Copyright (c) 2016-2021 Dell Inc. or its subsidiaries.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
from ironicclient import client
from subprocess import check_output
from credential_helper import CredentialHelper
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--power", required=True, default=None,
choices=["on", "off", "reset", "cycle"],
help="Control power state of all overcloud nodes")
args = parser.parse_args()
os_auth_url, os_tenant_name, os_username, os_password, \
os_user_domain_name, os_project_domain_name = \
CredentialHelper.get_undercloud_creds()
kwargs = {'os_username': os_username,
'os_password': os_password,
'os_auth_url': os_auth_url,
'os_tenant_name': os_tenant_name,
'os_user_domain_name': os_user_domain_name,
'os_project_domain_name': os_project_domain_name}
ironic = client.get_client(1, **kwargs)
for node in ironic.node.list(detail=True):
ip, username, password = \
CredentialHelper.get_drac_creds_from_node(node)
cmd = "ipmitool -H {} -I lanplus -U {} -P '{}' chassis power {}". \
format(ip, username, password, args.power)
print(cmd)
os.system(cmd)
if __name__ == "__main__":
main()
|
dsp-jetpack/JetPack
|
src/pilot/control_overcloud.py
|
Python
|
apache-2.0
| 1,930
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
'''
Description:
Created on 2016Äê5ÔÂ26ÈÕ
@author: weihua
@version:
'''
from __future__ import division
import pandas as pd
import numpy as np
import pandas.io.sql as sql
from datetime import datetime
from sklearn.linear_model import LogisticRegression
from sklearn import cross_validation
from sklearn import metrics
from datetime import datetime
from pandas.tseries.offsets import Day
import time
from sklearn.externals import joblib
import cPickle as pkl
import glob
import config
def ts2string(x):
return x.strftime("%Y-%m-%d %H:%M:%S")
def string2ts(x):
return datetime.strptime(x,"%Y-%m-%d %H:%M:%S")
def timestring2int(tString):
ts=string2ts(tString)
return int(ts.hour*6+ts.minute/10)+1
def cvt_dayofweek(x):
return datetime.weekday(x)
def generate_distict_df():
data=pd.read_csv("./data/training_data/cluster_map/cluster_map",sep='\t',names=['district_hash','district_id'],index_col=0)
output = open("./data/preprocessed_data/district_dict.pkl", 'wb')
pkl.dump(data,output,True)
output.close()
return data
def load_disctrict_df(file="district_dict.pkl"):
pkl_file = open("./data/preprocessed_data/%s"%file, 'rb')
data=pkl.load(pkl_file)
pkl_file.close()
return data
def districthash2int(data,districtString):
return data.ix[districtString]['district_id']
def generate_data(column_names = config.order_data_names,aim = "train",table_name = "order_data"):
if aim == "train":
input_path = config.train_input_data_path
elif aim == "test":
input_path = config.test_input_data_path
table_path = input_path + table_name
dirs = glob.glob(table_path+"\\*")
temp_dfs = []
for dir in dirs:
_data = pd.read_csv(dir,sep = "\t",names = column_names)
temp_dfs.append(_data)
data = pd.concat(temp_dfs,axis = 0)
return data
def generate_order_df(aim="train"):
output = open("./data/preprocessed_data/order_df_%s.pkl"%aim, 'wb')
if(aim=="train"):
input_path="training_data"
ts_start=string2ts("2016-01-01 00:00:00")
count=0
temp=[]
while(count<20):
ts_start_day=ts2string(ts_start)[:10]
print("./data/%s/order_data/order_data_%s"%(input_path,ts_start_day))
data=pd.read_csv("./data/%s/order_data/order_data_%s"%(input_path,ts_start_day),sep='\t',
names=['order_id','driver_id','passenger_id','start_dist_hash',
'dest_dist_hash','price','time'])
temp.append(data)
ts_start= ts_start + Day()
count+=1
data=pd.concat(temp,axis=0)
pkl.dump(data,output,True)
elif(aim=="test"):
input_path="test_set_1"
test_set=['2016-01-22','2016-01-24','2016-04-26','2016-01-28','2016-01-30']
temp=[]
for ts_start_day in test_set:
data=pd.read_csv("./data/%s/order_data/order_data_%s"%(input_path,ts_start_day),sep='\t',
names=['order_id','driver_id','passenger_id','start_dist_hash',
'dest_dist_hash','price','time'])
temp.append(data)
data=pd.concat(temp,axis=0)
pkl.dump(data,output,True)
elif(aim=="predict"):
input_path="test_set_1"
data=pd.read_csv("./data/%s/order_data/read_me_1.txt"%(input_path),sep='\t',
names=['origin'])
output.close()
return data
def result_generate(result,comment="temp"):
if(comment=="temp"):
comment=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
else:
comment+=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
result.to_csv("./data/result/result%s.csv"%comment,sep=",",header=False)
def save_pickle(obj,file_name="non-named"):
output = open("./data/preprocessed_data/%s.pkl"%file_name, 'wb')
pkl.dump(obj,output,True)
output.close()
if __name__=="__main__":
pass
|
diditech/dd_tech
|
utils.py
|
Python
|
apache-2.0
| 4,028
|
import sys
import os
import cv2
from keras.models import load_model
sys.path.append("/Users/alexpapiu/Documents/Conv/OpenCV_CNN")
from webcam_cnn_pipeline import return_compiled_model_2, real_time_pred
model_name = sys.argv[1]
w = 1.5*144
h = 2*144
#keep track of all labels:
all_labels = {"model_hand":["A", "B", "C", "D", "No Hand"],
"basic_model":["happy", "sad", "normal", "incredulous"],
"model_face":["happy", "sad", "normal"]}
labelz = dict(enumerate(all_labels[model_name]))
os.chdir("/Users/alexpapiu/Documents/Data/OpenCV_CNN")
model = return_compiled_model_2(input_shape = (3,int(h),int(w)),
num_class = len(labelz))
model.load_weights(model_name)
#open a new video:
cp = cv2.VideoCapture(0)
cp.set(3, w)
cp.set(4, h)
real_time_pred(model, labelz, cp = cp, nframes = 10000)
|
apapiu/live_cnn
|
live_cnn/reusing_model.py
|
Python
|
apache-2.0
| 861
|
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import operator
import nova.scheduler
from nova.scheduler.filters import abstract_filter
class JsonFilter(abstract_filter.AbstractHostFilter):
"""Host Filter to allow simple JSON-based grammar for
selecting hosts.
"""
def _op_compare(self, args, op):
"""Returns True if the specified operator can successfully
compare the first item in the args with all the rest. Will
return False if only one item is in the list.
"""
if len(args) < 2:
return False
if op is operator.contains:
bad = not args[0] in args[1:]
else:
bad = [arg for arg in args[1:]
if not op(args[0], arg)]
return not bool(bad)
def _equals(self, args):
"""First term is == all the other terms."""
return self._op_compare(args, operator.eq)
def _less_than(self, args):
"""First term is < all the other terms."""
return self._op_compare(args, operator.lt)
def _greater_than(self, args):
"""First term is > all the other terms."""
return self._op_compare(args, operator.gt)
def _in(self, args):
"""First term is in set of remaining terms"""
return self._op_compare(args, operator.contains)
def _less_than_equal(self, args):
"""First term is <= all the other terms."""
return self._op_compare(args, operator.le)
def _greater_than_equal(self, args):
"""First term is >= all the other terms."""
return self._op_compare(args, operator.ge)
def _not(self, args):
"""Flip each of the arguments."""
return [not arg for arg in args]
def _or(self, args):
"""True if any arg is True."""
return any(args)
def _and(self, args):
"""True if all args are True."""
return all(args)
commands = {
'=': _equals,
'<': _less_than,
'>': _greater_than,
'in': _in,
'<=': _less_than_equal,
'>=': _greater_than_equal,
'not': _not,
'or': _or,
'and': _and,
}
def instance_type_to_filter(self, instance_type):
"""Convert instance_type into JSON filter object."""
required_ram = instance_type['memory_mb']
required_disk = instance_type['local_gb']
query = ['and',
['>=', '$compute.host_memory_free', required_ram],
['>=', '$compute.disk_available', required_disk]]
return json.dumps(query)
def _parse_string(self, string, host, hostinfo):
"""Strings prefixed with $ are capability lookups in the
form '$service.capability[.subcap*]'.
"""
if not string:
return None
if not string.startswith("$"):
return string
path = string[1:].split(".")
services = dict(compute=hostinfo.compute, network=hostinfo.network,
volume=hostinfo.volume)
service = services.get(path[0], None)
if not service:
return None
for item in path[1:]:
service = service.get(item, None)
if not service:
return None
return service
def _process_filter(self, query, host, hostinfo):
"""Recursively parse the query structure."""
if not query:
return True
cmd = query[0]
method = self.commands[cmd]
cooked_args = []
for arg in query[1:]:
if isinstance(arg, list):
arg = self._process_filter(arg, host, hostinfo)
elif isinstance(arg, basestring):
arg = self._parse_string(arg, host, hostinfo)
if arg is not None:
cooked_args.append(arg)
result = method(self, cooked_args)
return result
def filter_hosts(self, host_list, query, options):
"""Return a list of hosts that can fulfill the requirements
specified in the query.
"""
expanded = json.loads(query)
filtered_hosts = []
for host, hostinfo in host_list:
if not hostinfo:
continue
if hostinfo.compute and not hostinfo.compute.get("enabled", True):
# Host is disabled
continue
result = self._process_filter(expanded, host, hostinfo)
if isinstance(result, list):
# If any succeeded, include the host
result = any(result)
if result:
filtered_hosts.append((host, hostinfo))
return filtered_hosts
|
salv-orlando/MyRepo
|
nova/scheduler/filters/json_filter.py
|
Python
|
apache-2.0
| 5,243
|
from os.path import join, dirname
from setuptools import setup
setup(
name = 'xmppgcm',
packages = ['xmppgcm'], # this must be the same as the name above
version = '0.2.3',
description = 'Client Library for Firebase Cloud Messaging using XMPP',
long_description = open(join(dirname(__file__), 'README.txt')).read(),
install_requires=['sleekxmpp',],
author = 'Winster Jose',
author_email = 'wtjose@gmail.com',
url = 'https://github.com/winster/xmppgcm',
keywords = ['gcm', 'fcm', 'xmpp', 'xmppgcm', 'xmppfcm'], # arbitrary keywords
classifiers = [],
)
|
gamikun/xmppgcm
|
setup.py
|
Python
|
apache-2.0
| 574
|
#
# Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""Base class for plugins."""
import os
import sys
import platform
from os import access, F_OK
from sys import stdout
from time import sleep
from subprocess import call
from mbed_lstools.main import create
from ..host_tests_logger import HtrunLogger
class HostTestPluginBase:
"""Base class for all plugins used with host tests."""
###########################################################################
# Interface:
###########################################################################
###########################################################################
# Interface attributes defining plugin name, type etc.
###########################################################################
name = "HostTestPluginBase" # Plugin name, can be plugin class name
type = "BasePlugin" # Plugin type: ResetMethod, CopyMethod etc.
capabilities = [] # Capabilities names: what plugin can achieve
# (e.g. reset using some external command line tool)
required_parameters = (
[]
) # Parameters required for 'kwargs' in plugin APIs: e.g. self.execute()
stable = False # Determine if plugin is stable and can be used
def __init__(self):
"""Initialise the object."""
# Setting Host Test Logger instance
ht_loggers = {
"BasePlugin": HtrunLogger("PLGN"),
"CopyMethod": HtrunLogger("COPY"),
"ResetMethod": HtrunLogger("REST"),
}
self.plugin_logger = ht_loggers.get(self.type, ht_loggers["BasePlugin"])
###########################################################################
# Interface methods
###########################################################################
def setup(self, *args, **kwargs):
"""Configure plugin.
This function should be called before plugin execute() method is used.
"""
return False
def execute(self, capability, *args, **kwargs):
"""Execute plugin 'capability' by name.
Each capability may directly just call some command line program or execute a
function.
Args:
capability: Capability name.
args: Additional arguments.
kwargs: Additional arguments.
Returns:
Capability call return value.
"""
return False
def is_os_supported(self, os_name=None):
"""Check if the OS is supported by this plugin.
In some cases a plugin will not work under a particular OS. Usually because the
command line tool used to implement the plugin functionality is not available.
Args:
os_name: String describing OS. See self.host_os_support() and
self.host_os_info()
Returns:
True if plugin works under certain OS.
"""
return True
###########################################################################
# Interface helper methods - overload only if you need to have custom behaviour
###########################################################################
def print_plugin_error(self, text):
"""Print error messages to the console.
Args:
text: Text to print.
"""
self.plugin_logger.prn_err(text)
return False
def print_plugin_info(self, text, NL=True):
"""Print notifications to the console.
Args:
text: Text to print.
NL: (Deprecated) Newline will be added behind text if this flag is True.
"""
self.plugin_logger.prn_inf(text)
return True
def print_plugin_char(self, char):
"""Print a char to stdout."""
stdout.write(char)
stdout.flush()
return True
def check_mount_point_ready(
self,
destination_disk,
init_delay=0.2,
loop_delay=0.25,
target_id=None,
timeout=60,
):
"""Wait until destination_disk is ready and can be accessed.
Args:
destination_disk: Mount point (disk) which will be checked for readiness.
init_delay: Initial delay time before first access check.
loop_delay: Polling delay for access check.
timeout: Polling timeout in seconds.
Returns:
True if mount point was ready in given time, otherwise False.
"""
if target_id:
# Wait for mount point to appear with mbed-ls
# and if it does check if mount point for target_id changed
# If mount point changed, use new mount point and check if its ready.
new_destination_disk = destination_disk
# Sometimes OSes take a long time to mount devices (up to one minute).
# Current pooling time: 120x 500ms = 1 minute
self.print_plugin_info(
"Waiting up to %d sec for '%s' mount point (current is '%s')..."
% (timeout, target_id, destination_disk)
)
timeout_step = 0.5
timeout = int(timeout / timeout_step)
for i in range(timeout):
# mbed_lstools.main.create() should be done inside the loop.
# Otherwise it will loop on same data.
mbeds = create()
mbed_list = mbeds.list_mbeds() # list of mbeds present
# get first item in list with a matching target_id, if present
mbed_target = next(
(x for x in mbed_list if x["target_id"] == target_id), None
)
if mbed_target is not None:
# Only assign if mount point is present and known (not None)
if (
"mount_point" in mbed_target
and mbed_target["mount_point"] is not None
):
new_destination_disk = mbed_target["mount_point"]
break
sleep(timeout_step)
if new_destination_disk != destination_disk:
# Mount point changed, update to new mount point from mbed-ls
self.print_plugin_info(
"Mount point for '%s' changed from '%s' to '%s'..."
% (target_id, destination_disk, new_destination_disk)
)
destination_disk = new_destination_disk
result = True
# Check if mount point we've promoted to be valid one (by optional target_id
# check above)
# Let's wait for 30 * loop_delay + init_delay max
if not access(destination_disk, F_OK):
self.print_plugin_info(
"Waiting for mount point '%s' to be ready..." % destination_disk,
NL=False,
)
sleep(init_delay)
for i in range(30):
if access(destination_disk, F_OK):
result = True
break
sleep(loop_delay)
self.print_plugin_char(".")
else:
self.print_plugin_error(
"mount {} is not accessible ...".format(destination_disk)
)
result = False
return (result, destination_disk)
def check_serial_port_ready(self, serial_port, target_id=None, timeout=60):
"""Check and update serial port name information for DUT.
If no target_id is specified return the old serial port name.
Args:
serial_port: Current serial port name.
target_id: Target ID of a device under test.
timeout: Serial port pooling timeout in seconds.
Returns:
Tuple with result (always True) and serial port read from mbed-ls.
"""
# If serial port changed (check using mbed-ls), use new serial port
new_serial_port = None
if target_id:
# Sometimes OSes take a long time to mount devices (up to one minute).
# Current pooling time: 120x 500ms = 1 minute
self.print_plugin_info(
"Waiting up to %d sec for '%s' serial port (current is '%s')..."
% (timeout, target_id, serial_port)
)
timeout_step = 0.5
timeout = int(timeout / timeout_step)
for i in range(timeout):
# mbed_lstools.main.create() should be done inside the loop. Otherwise
# it will loop on same data.
mbeds = create()
mbed_list = mbeds.list_mbeds() # list of mbeds present
# get first item in list with a matching target_id, if present
mbed_target = next(
(x for x in mbed_list if x["target_id"] == target_id), None
)
if mbed_target is not None:
# Only assign if serial port is present and known (not None)
if (
"serial_port" in mbed_target
and mbed_target["serial_port"] is not None
):
new_serial_port = mbed_target["serial_port"]
if new_serial_port != serial_port:
# Serial port changed, update to new serial port
self.print_plugin_info(
"Serial port for tid='%s' changed from '%s' to '%s'..."
% (target_id, serial_port, new_serial_port)
)
break
sleep(timeout_step)
else:
new_serial_port = serial_port
return new_serial_port
def check_parameters(self, capability, *args, **kwargs):
"""Check if required parameters are missing.
This function should be called each time we call execute().
Args:
capability: Capability name.
args: Additional parameters.
kwargs: Additional parameters.
Returns:
True if all required parameters are passed to plugin, otherwise False.
"""
missing_parameters = []
for parameter in self.required_parameters:
if parameter not in kwargs:
missing_parameters.append(parameter)
if len(missing_parameters):
self.print_plugin_error(
"execute parameter(s) '%s' missing!" % (", ".join(missing_parameters))
)
return False
return True
def run_command(self, cmd, shell=True, stdin=None):
"""Run a shell command as a subprocess.
Prints 'cmd' return code if execution failed.
Args:
cmd: Command to execute.
shell: True if shell command should be executed (eg. ls, ps).
stdin: A custom stdin for the process running the command (defaults
to None).
Returns:
True if command successfully executed, otherwise False.
"""
result = True
try:
ret = call(cmd, shell=shell, stdin=stdin)
if ret:
self.print_plugin_error("[ret=%d] Command: %s" % (int(ret), cmd))
return False
except Exception as e:
result = False
self.print_plugin_error("[ret=%d] Command: %s" % (int(ret), cmd))
self.print_plugin_error(str(e))
return result
def host_os_info(self):
"""Return information about host OS.
Returns:
Tuple with information about OS and host platform.
"""
result = (
os.name,
platform.system(),
platform.release(),
platform.version(),
sys.platform,
)
return result
def host_os_support(self):
"""Determine host OS.
This function should be ported for new OS support.
Returns:
None if host OS is unknown, else string with name.
"""
result = None
os_info = self.host_os_info()
if os_info[0] == "nt" and os_info[1] == "Windows":
result = "Windows7"
elif (
os_info[0] == "posix" and os_info[1] == "Linux" and ("Ubuntu" in os_info[3])
):
result = "Ubuntu"
elif os_info[0] == "posix" and os_info[1] == "Linux":
result = "LinuxGeneric"
elif os_info[0] == "posix" and os_info[1] == "Darwin":
result = "Darwin"
return result
|
ARMmbed/greentea
|
src/htrun/host_tests_plugins/host_test_plugins.py
|
Python
|
apache-2.0
| 12,644
|
def italianhello():
i01.setHandSpeed("left", 0.60, 0.60, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.65, 0.75)
i01.moveHead(105,78)
i01.moveArm("left",78,48,37,11)
i01.moveArm("right",90,144,60,75)
i01.moveHand("left",112,111,105,102,81,10)
i01.moveHand("right",0,0,0,50,82,180)
ear.pauseListening()
sleep(1)
for w in range(0,3):
i01.setHandSpeed("left", 0.60, 0.60, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 0.60)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.60, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.65, 0.75)
i01.moveHead(83,98)
i01.moveArm("left",78,48,37,11)
i01.moveArm("right",90,157,47,75)
i01.moveHand("left",112,111,105,102,81,10)
i01.moveHand("right",3,0,62,41,117,94)
if w==1:
i01.setHandSpeed("left", 0.60, 0.60, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 0.60)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.65, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.65, 0.75)
i01.moveHead(83,70)
i01.mouth.speakBlocking("ciao , il mio nome e inmoov one")
i01.moveArm("left",78,48,37,11)
i01.moveArm("right",57,145,50,68)
i01.moveHand("left",100,90,85,80,71,15)
i01.moveHand("right",3,0,31,12,26,45)
sleep(1)
i01.moveHead(83,98)
i01.moveArm("left",78,48,37,11)
i01.moveArm("right",90,157,47,75)
i01.moveHand("left",112,111,105,102,81,10)
i01.moveHand("right",3,0,62,41,117,94)
sleep(1)
i01.setHandSpeed("left", 0.85, 0.85, 0.85, 0.85, 0.85, 0.85)
i01.setHandSpeed("right", 0.85, 0.85, 0.85, 0.85, 0.85, 0.85)
i01.setArmSpeed("right", 0.75, 0.85, 0.95, 0.85)
i01.setArmSpeed("left", 0.95, 0.65, 0.75, 0.75)
i01.setHeadSpeed(0.75, 0.75)
i01.moveHead(79,100)
i01.moveArm("left",5,94,28,15)
i01.moveArm("right",5,82,28,15)
i01.moveHand("left",42,58,42,55,71,35)
i01.moveHand("right",81,50,82,60,105,113)
ear.resumeListening()
|
MyRobotLab/pyrobotlab
|
home/kwatters/harry/gestures/italianhello.py
|
Python
|
apache-2.0
| 2,293
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: mnist-visualizations.py
"""
The same MNIST ConvNet example, but with weights/activations visualization.
"""
import tensorflow as tf
from tensorpack import *
from tensorpack.dataflow import dataset
IMAGE_SIZE = 28
def visualize_conv_weights(filters, name):
"""Visualize use weights in convolution filters.
Args:
filters: tensor containing the weights [H,W,Cin,Cout]
name: label for tensorboard
Returns:
image of all weight
"""
with tf.name_scope('visualize_w_' + name):
filters = tf.transpose(filters, (3, 2, 0, 1)) # [h, w, cin, cout] -> [cout, cin, h, w]
filters = tf.unstack(filters) # --> cout * [cin, h, w]
filters = tf.concat(filters, 1) # --> [cin, cout * h, w]
filters = tf.unstack(filters) # --> cin * [cout * h, w]
filters = tf.concat(filters, 1) # --> [cout * h, cin * w]
filters = tf.expand_dims(filters, 0)
filters = tf.expand_dims(filters, -1)
tf.summary.image('visualize_w_' + name, filters)
def visualize_conv_activations(activation, name):
"""Visualize activations for convolution layers.
Remarks:
This tries to place all activations into a square.
Args:
activation: tensor with the activation [B,H,W,C]
name: label for tensorboard
Returns:
image of almost all activations
"""
import math
with tf.name_scope('visualize_act_' + name):
_, h, w, c = activation.get_shape().as_list()
rows = []
c_per_row = int(math.sqrt(c))
for y in range(0, c - c_per_row, c_per_row):
row = activation[:, :, :, y:y + c_per_row] # [?, H, W, 32] --> [?, H, W, 5]
cols = tf.unstack(row, axis=3) # [?, H, W, 5] --> 5 * [?, H, W]
row = tf.concat(cols, 1)
rows.append(row)
viz = tf.concat(rows, 2)
tf.summary.image('visualize_act_' + name, tf.expand_dims(viz, -1))
class Model(ModelDesc):
def inputs(self):
return [tf.placeholder(tf.float32, (None, IMAGE_SIZE, IMAGE_SIZE), 'input'),
tf.placeholder(tf.int32, (None,), 'label')]
def build_graph(self, image, label):
image = tf.expand_dims(image * 2 - 1, 3)
with argscope(Conv2D, kernel_shape=3, nl=tf.nn.relu, out_channel=32):
c0 = Conv2D('conv0', image)
p0 = MaxPooling('pool0', c0, 2)
c1 = Conv2D('conv1', p0)
c2 = Conv2D('conv2', c1)
p1 = MaxPooling('pool1', c2, 2)
c3 = Conv2D('conv3', p1)
fc1 = FullyConnected('fc0', c3, 512, nl=tf.nn.relu)
fc1 = Dropout('dropout', fc1, 0.5)
logits = FullyConnected('fc1', fc1, out_dim=10, nl=tf.identity)
with tf.name_scope('visualizations'):
visualize_conv_weights(c0.variables.W, 'conv0')
visualize_conv_activations(c0, 'conv0')
visualize_conv_weights(c1.variables.W, 'conv1')
visualize_conv_activations(c1, 'conv1')
visualize_conv_weights(c2.variables.W, 'conv2')
visualize_conv_activations(c2, 'conv2')
visualize_conv_weights(c3.variables.W, 'conv3')
visualize_conv_activations(c3, 'conv3')
tf.summary.image('input', (image + 1.0) * 128., 3)
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss')
tf.reduce_mean(tf.to_float(tf.nn.in_top_k(logits, label, 1)), name='accuracy')
wd_cost = tf.multiply(1e-5,
regularize_cost('fc.*/W', tf.nn.l2_loss),
name='regularize_loss')
return tf.add_n([wd_cost, cost], name='total_cost')
def optimizer(self):
lr = tf.train.exponential_decay(
learning_rate=1e-3,
global_step=get_global_step_var(),
decay_steps=468 * 10,
decay_rate=0.3, staircase=True, name='learning_rate')
tf.summary.scalar('lr', lr)
return tf.train.AdamOptimizer(lr)
def get_data():
train = BatchData(dataset.Mnist('train'), 128)
test = BatchData(dataset.Mnist('test'), 256, remainder=True)
return train, test
if __name__ == '__main__':
logger.auto_set_dir()
dataset_train, dataset_test = get_data()
config = TrainConfig(
model=Model(),
dataflow=dataset_train,
callbacks=[
ModelSaver(),
InferenceRunner(
dataset_test, ScalarStats(['cross_entropy_loss', 'accuracy'])),
],
steps_per_epoch=len(dataset_train),
max_epoch=100,
)
launch_train_with_config(config, SimpleTrainer())
|
eyaler/tensorpack
|
examples/basics/mnist-visualizations.py
|
Python
|
apache-2.0
| 4,834
|
# -*- coding: utf-8 -*-
"""
Misc stuff also needed for core imports and monkey patching
"""
import numpy as np
from .core import (RVector3, R3Vector, RMatrix)
def isScalar(v, val=None):
"""Check if v is scalar, i.e. int, float or complex.
Optional compare with val.
Examples
--------
>>> import pygimli as pg
>>> print(pg.isScalar(0))
True
>>> print(pg.isScalar(1.0))
True
>>> print(pg.isScalar(1.0, 0.0))
False
>>> print(pg.isScalar(1.0, 1.0))
True
>>> print(pg.isScalar(1+1j))
True
>>> print(pg.isScalar([0.0, 1.0]))
False
"""
if val is None:
return isinstance(v, (int, float, complex, np.complex128))
# maybe add some tolerance check
return isinstance(v, (int, float, complex, np.complex128)) and v == val
def isArray(v, N=None):
"""Check if `v` is a 1D array or a vector, with optional size `N`.
Examples
--------
>>> import pygimli as pg
>>> import numpy as np
>>> print(pg.isArray([0, 1]))
True
>>> print(pg.isArray(np.ones(5)))
True
>>> print(pg.isArray(pg.Vector(5)))
True
>>> print(pg.isArray(pg.Vector(5), N=5))
True
>>> print(pg.isArray(pg.Vector(5), N=2))
False
>>> print(pg.isArray('foo'))
False
"""
if N is None:
if isinstance(v, (tuple, list)):
return isScalar(v[0])
return (hasattr(v, '__iter__') and \
not isinstance(v, (str))) and v.ndim == 1
return isArray(v) and len(v) == N
def isComplex(vals):
"""Check numpy or pg.Vector if have complex data type"""
if isScalar(vals):
if isinstance(vals, (np.complex128, complex)):
return True
elif isArray(vals):
return isComplex(vals[0])
return False
def isPos(v):
"""Check if v is an array of size(3), [x,y,z], or pg.Pos.
Examples
--------
>>> import pygimli as pg
>>> print(pg.isPos([0.0, 0.0, 1.]))
True
>>> print(pg.isPos(pg.Pos(0.0, 0.0, 0.0)))
True
>>> print(pg.isPos(np.ones(3)))
True
>>> print(pg.isPos(np.ones(4)))
False
"""
return isArray(v, 2) or isArray(v, 3) or isinstance(v, RVector3)
def isR3Array(v, N=None):
"""Check if v is an array of size(N,3), a R3Vector or a list of pg.Pos.
Examples
--------
>>> import pygimli as pg
>>> print(pg.isR3Array([[0.0, 0.0, 1.], [1.0, 0.0, 1.]]))
True
>>> print(pg.isR3Array(np.ones((33, 3)), N=33))
True
>>> print(pg.isR3Array(pg.meshtools.createGrid(2,2).positions()))
True
"""
if N is None:
return isinstance(v, R3Vector) or \
( isinstance(v, list) and isPos(v[0])) or \
(not isinstance(v, list) and hasattr(v, '__iter__') and \
not isinstance(v, (str)) and v.ndim == 2 and isPos(v[0]))
return isR3Array(v) and len(v) == N
isPosList = isR3Array
def isMatrix(v, shape=None):
"""Check is v has ndim=2 or is comparable list"""
if shape is None:
return isinstance(v, RMatrix) or \
hasattr(v, 'ndim') and v.ndim == 2 or \
isinstance(v, list) and isArray(v[0])
return isMatrix(v) and (hasattr(v, 'shape') and v.shape == shape)
|
gimli-org/gimli
|
pygimli/core/base.py
|
Python
|
apache-2.0
| 3,243
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from neutron.db import db_base_plugin_v2
from neutron.db import subnet_service_type_db_models
from neutron.extensions import subnet_service_types
from neutron.tests.unit.db import test_db_base_plugin_v2
class SubnetServiceTypesExtensionManager(object):
def get_resources(self):
return []
def get_actions(self):
return []
def get_request_extensions(self):
return []
def get_extended_resources(self, version):
extension = subnet_service_types.Subnet_service_types()
return extension.get_extended_resources(version)
class SubnetServiceTypesExtensionTestPlugin(
db_base_plugin_v2.NeutronDbPluginV2,
subnet_service_type_db_models.SubnetServiceTypeMixin):
"""Test plugin to mixin the subnet service_types extension.
"""
supported_extension_aliases = ["subnet-service-types"]
class SubnetServiceTypesExtensionTestCase(
test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
"""Test API extension subnet_service_types attributes.
"""
CIDRS = ['10.0.0.0/8', '20.0.0.0/8', '30.0.0.0/8']
IP_VERSION = 4
def setUp(self):
plugin = ('neutron.tests.unit.extensions.test_subnet_service_types.' +
'SubnetServiceTypesExtensionTestPlugin')
ext_mgr = SubnetServiceTypesExtensionManager()
super(SubnetServiceTypesExtensionTestCase,
self).setUp(plugin=plugin, ext_mgr=ext_mgr)
def _create_service_subnet(self, service_types=None, cidr=None,
network=None, enable_dhcp=False):
if not network:
with self.network() as network:
pass
network = network['network']
if not cidr:
cidr = self.CIDRS[0]
args = {'net_id': network['id'],
'tenant_id': network['tenant_id'],
'cidr': cidr,
'ip_version': self.IP_VERSION,
'enable_dhcp': enable_dhcp}
if service_types:
args['service_types'] = service_types
return self._create_subnet(self.fmt, **args)
def _test_create_subnet(self, service_types, expect_fail=False):
res = self._create_service_subnet(service_types)
if expect_fail:
self.assertEqual(webob.exc.HTTPClientError.code,
res.status_int)
else:
subnet = self.deserialize('json', res)
subnet = subnet['subnet']
self.assertEqual(len(service_types),
len(subnet['service_types']))
for service in service_types:
self.assertIn(service, subnet['service_types'])
def test_create_subnet_blank_type(self):
self._test_create_subnet([])
def test_create_subnet_bar_type(self):
self._test_create_subnet(['network:bar'])
def test_create_subnet_foo_type(self):
self._test_create_subnet(['compute:foo'])
def test_create_subnet_bar_and_foo_type(self):
self._test_create_subnet(['network:bar', 'compute:foo'])
def test_create_subnet_invalid_type(self):
self._test_create_subnet(['foo'], expect_fail=True)
self._test_create_subnet([1], expect_fail=True)
def test_create_subnet_no_type(self):
res = self._create_service_subnet()
subnet = self.deserialize('json', res)
subnet = subnet['subnet']
self.assertFalse(subnet['service_types'])
def _test_update_subnet(self, subnet, service_types, fail_code=None):
data = {'subnet': {'service_types': service_types}}
req = self.new_update_request('subnets', data, subnet['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
if fail_code is not None:
self.assertEqual(fail_code,
res['NeutronError']['type'])
else:
subnet = res['subnet']
self.assertEqual(len(service_types),
len(subnet['service_types']))
for service in service_types:
self.assertIn(service, subnet['service_types'])
def test_update_subnet_zero_to_one(self):
service_types = ['network:foo']
# Create a subnet with no service type
res = self._create_service_subnet()
subnet = self.deserialize('json', res)['subnet']
# Update it with a single service type
self._test_update_subnet(subnet, service_types)
def test_update_subnet_one_to_two(self):
service_types = ['network:foo']
# Create a subnet with one service type
res = self._create_service_subnet(service_types)
subnet = self.deserialize('json', res)['subnet']
# Update it with two service types
service_types.append('compute:bar')
self._test_update_subnet(subnet, service_types)
def test_update_subnet_two_to_one(self):
service_types = ['network:foo', 'compute:bar']
# Create a subnet with two service types
res = self._create_service_subnet(service_types)
subnet = self.deserialize('json', res)['subnet']
# Update it with one service type
service_types = ['network:foo']
self._test_update_subnet(subnet, service_types)
def test_update_subnet_one_to_zero(self):
service_types = ['network:foo']
# Create a subnet with one service type
res = self._create_service_subnet(service_types)
subnet = self.deserialize('json', res)['subnet']
# Update it with zero service types
service_types = []
self._test_update_subnet(subnet, service_types)
def test_update_subnet_invalid_type(self):
# Create a subnet with no service type
res = self._create_service_subnet()
subnet = self.deserialize('json', res)['subnet']
# Update it with invalid service type(s)
self._test_update_subnet(subnet, ['foo'],
fail_code='InvalidSubnetServiceType')
self._test_update_subnet(subnet, [2],
fail_code='InvalidInputSubnetServiceType')
def _assert_port_res(self, port, service_type, subnet, fallback,
error='IpAddressGenerationFailureNoMatchingSubnet'):
res = self.deserialize('json', port)
if fallback:
port = res['port']
self.assertEqual(1, len(port['fixed_ips']))
self.assertEqual(service_type, port['device_owner'])
self.assertEqual(subnet['id'], port['fixed_ips'][0]['subnet_id'])
else:
self.assertEqual(error, res['NeutronError']['type'])
def test_create_port_with_matching_service_type(self):
with self.network() as network:
pass
matching_type = 'network:foo'
non_matching_type = 'network:bar'
# Create a subnet with no service types
self._create_service_subnet(network=network)
# Create a subnet with a non-matching service type
self._create_service_subnet([non_matching_type],
cidr=self.CIDRS[2],
network=network)
# Create a subnet with a service type to match the port device owner
res = self._create_service_subnet([matching_type],
cidr=self.CIDRS[1],
network=network)
service_subnet = self.deserialize('json', res)['subnet']
# Create a port with device owner matching the correct service subnet
network = network['network']
port = self._create_port(self.fmt,
net_id=network['id'],
tenant_id=network['tenant_id'],
device_owner=matching_type)
self._assert_port_res(port, matching_type, service_subnet, True)
def test_create_port_without_matching_service_type(self, fallback=True):
with self.network() as network:
pass
subnet = ''
matching_type = 'compute:foo'
non_matching_type = 'network:foo'
if fallback:
# Create a subnet with no service types
res = self._create_service_subnet(network=network)
subnet = self.deserialize('json', res)['subnet']
# Create a subnet with a non-matching service type
self._create_service_subnet([non_matching_type],
cidr=self.CIDRS[1],
network=network)
# Create a port with device owner not matching the service subnet
network = network['network']
port = self._create_port(self.fmt,
net_id=network['id'],
tenant_id=network['tenant_id'],
device_owner=matching_type)
self._assert_port_res(port, matching_type, subnet, fallback)
def test_create_port_without_matching_service_type_no_fallback(self):
self.test_create_port_without_matching_service_type(fallback=False)
def test_create_port_no_device_owner(self, fallback=True):
with self.network() as network:
pass
subnet = ''
service_type = 'compute:foo'
if fallback:
# Create a subnet with no service types
res = self._create_service_subnet(network=network)
subnet = self.deserialize('json', res)['subnet']
# Create a subnet with a service_type
self._create_service_subnet([service_type],
cidr=self.CIDRS[1],
network=network)
# Create a port without a device owner
network = network['network']
port = self._create_port(self.fmt,
net_id=network['id'],
tenant_id=network['tenant_id'])
self._assert_port_res(port, '', subnet, fallback)
def test_create_port_no_device_owner_no_fallback(self):
self.test_create_port_no_device_owner(fallback=False)
def test_create_port_exhausted_subnet(self, fallback=True):
with self.network() as network:
pass
subnet = ''
service_type = 'compute:foo'
if fallback:
# Create a subnet with no service types
res = self._create_service_subnet(network=network)
subnet = self.deserialize('json', res)['subnet']
# Create a subnet with a service_type
res = self._create_service_subnet([service_type],
cidr=self.CIDRS[1],
network=network)
service_subnet = self.deserialize('json', res)['subnet']
# Update the service subnet with empty allocation pools
data = {'subnet': {'allocation_pools': []}}
req = self.new_update_request('subnets', data, service_subnet['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
# Create a port with a matching device owner
network = network['network']
port = self._create_port(self.fmt,
net_id=network['id'],
tenant_id=network['tenant_id'],
device_owner=service_type)
self._assert_port_res(port, service_type, subnet, fallback,
error='IpAddressGenerationFailure')
def test_create_port_exhausted_subnet_no_fallback(self):
self.test_create_port_exhausted_subnet(fallback=False)
def test_create_dhcp_port_compute_subnet(self, enable_dhcp=True):
with self.network() as network:
pass
res = self._create_service_subnet(['compute:nova'],
network=network,
enable_dhcp=enable_dhcp)
subnet = self.deserialize('json', res)['subnet']
network = network['network']
port = self._create_port(self.fmt,
net_id=network['id'],
tenant_id=network['tenant_id'],
fixed_ips=[{'subnet_id': subnet['id']}],
device_owner='network:dhcp')
self._assert_port_res(port, 'network:dhcp', subnet, enable_dhcp)
def test_create_dhcp_port_compute_subnet_no_dhcp(self):
self.test_create_dhcp_port_compute_subnet(enable_dhcp=False)
def test_update_port_fixed_ips(self):
with self.network() as network:
pass
service_type = 'compute:foo'
# Create a subnet with a service_type
res = self._create_service_subnet([service_type],
cidr=self.CIDRS[1],
network=network)
service_subnet = self.deserialize('json', res)['subnet']
# Create a port with a matching device owner
network = network['network']
port = self._create_port(self.fmt,
net_id=network['id'],
tenant_id=network['tenant_id'],
device_owner=service_type)
port = self.deserialize('json', port)['port']
# Update the port's fixed_ips. It's ok to reuse the same IP it already
# has.
ip_address = port['fixed_ips'][0]['ip_address']
data = {'port': {'fixed_ips': [{'subnet_id': service_subnet['id'],
'ip_address': ip_address}]}}
# self._update will fail with a MismatchError if the update cannot be
# applied
port = self._update('ports', port['id'], data)
class SubnetServiceTypesExtensionTestCasev6(
SubnetServiceTypesExtensionTestCase):
CIDRS = ['2001:db8:2::/64', '2001:db8:3::/64', '2001:db8:4::/64']
IP_VERSION = 6
|
eayunstack/neutron
|
neutron/tests/unit/extensions/test_subnet_service_types.py
|
Python
|
apache-2.0
| 14,519
|
"""
Module with location helpers.
detect_location_info and elevation are mocked by default during tests.
"""
import asyncio
import collections
import math
from typing import Any, Dict, Optional, Tuple
import aiohttp
ELEVATION_URL = "https://api.open-elevation.com/api/v1/lookup"
IP_API = "http://ip-api.com/json"
IPAPI = "https://ipapi.co/json/"
# Constants from https://github.com/maurycyp/vincenty
# Earth ellipsoid according to WGS 84
# Axis a of the ellipsoid (Radius of the earth in meters)
AXIS_A = 6378137
# Flattening f = (a-b) / a
FLATTENING = 1 / 298.257223563
# Axis b of the ellipsoid in meters.
AXIS_B = 6356752.314245
MILES_PER_KILOMETER = 0.621371
MAX_ITERATIONS = 200
CONVERGENCE_THRESHOLD = 1e-12
LocationInfo = collections.namedtuple(
"LocationInfo",
[
"ip",
"country_code",
"country_name",
"region_code",
"region_name",
"city",
"zip_code",
"time_zone",
"latitude",
"longitude",
"use_metric",
],
)
async def async_detect_location_info(
session: aiohttp.ClientSession,
) -> Optional[LocationInfo]:
"""Detect location information."""
data = await _get_ipapi(session)
if data is None:
data = await _get_ip_api(session)
if data is None:
return None
data["use_metric"] = data["country_code"] not in ("US", "MM", "LR")
return LocationInfo(**data)
def distance(
lat1: Optional[float], lon1: Optional[float], lat2: float, lon2: float
) -> Optional[float]:
"""Calculate the distance in meters between two points.
Async friendly.
"""
if lat1 is None or lon1 is None:
return None
result = vincenty((lat1, lon1), (lat2, lon2))
if result is None:
return None
return result * 1000
# Author: https://github.com/maurycyp
# Source: https://github.com/maurycyp/vincenty
# License: https://github.com/maurycyp/vincenty/blob/master/LICENSE
# pylint: disable=invalid-name
def vincenty(
point1: Tuple[float, float], point2: Tuple[float, float], miles: bool = False
) -> Optional[float]:
"""
Vincenty formula (inverse method) to calculate the distance.
Result in kilometers or miles between two points on the surface of a
spheroid.
Async friendly.
"""
# short-circuit coincident points
if point1[0] == point2[0] and point1[1] == point2[1]:
return 0.0
U1 = math.atan((1 - FLATTENING) * math.tan(math.radians(point1[0])))
U2 = math.atan((1 - FLATTENING) * math.tan(math.radians(point2[0])))
L = math.radians(point2[1] - point1[1])
Lambda = L
sinU1 = math.sin(U1)
cosU1 = math.cos(U1)
sinU2 = math.sin(U2)
cosU2 = math.cos(U2)
for _ in range(MAX_ITERATIONS):
sinLambda = math.sin(Lambda)
cosLambda = math.cos(Lambda)
sinSigma = math.sqrt(
(cosU2 * sinLambda) ** 2 + (cosU1 * sinU2 - sinU1 * cosU2 * cosLambda) ** 2
)
if sinSigma == 0.0:
return 0.0 # coincident points
cosSigma = sinU1 * sinU2 + cosU1 * cosU2 * cosLambda
sigma = math.atan2(sinSigma, cosSigma)
sinAlpha = cosU1 * cosU2 * sinLambda / sinSigma
cosSqAlpha = 1 - sinAlpha ** 2
try:
cos2SigmaM = cosSigma - 2 * sinU1 * sinU2 / cosSqAlpha
except ZeroDivisionError:
cos2SigmaM = 0
C = FLATTENING / 16 * cosSqAlpha * (4 + FLATTENING * (4 - 3 * cosSqAlpha))
LambdaPrev = Lambda
Lambda = L + (1 - C) * FLATTENING * sinAlpha * (
sigma
+ C * sinSigma * (cos2SigmaM + C * cosSigma * (-1 + 2 * cos2SigmaM ** 2))
)
if abs(Lambda - LambdaPrev) < CONVERGENCE_THRESHOLD:
break # successful convergence
else:
return None # failure to converge
uSq = cosSqAlpha * (AXIS_A ** 2 - AXIS_B ** 2) / (AXIS_B ** 2)
A = 1 + uSq / 16384 * (4096 + uSq * (-768 + uSq * (320 - 175 * uSq)))
B = uSq / 1024 * (256 + uSq * (-128 + uSq * (74 - 47 * uSq)))
deltaSigma = (
B
* sinSigma
* (
cos2SigmaM
+ B
/ 4
* (
cosSigma * (-1 + 2 * cos2SigmaM ** 2)
- B
/ 6
* cos2SigmaM
* (-3 + 4 * sinSigma ** 2)
* (-3 + 4 * cos2SigmaM ** 2)
)
)
)
s = AXIS_B * A * (sigma - deltaSigma)
s /= 1000 # Conversion of meters to kilometers
if miles:
s *= MILES_PER_KILOMETER # kilometers to miles
return round(s, 6)
async def _get_ipapi(session: aiohttp.ClientSession) -> Optional[Dict[str, Any]]:
"""Query ipapi.co for location data."""
try:
resp = await session.get(IPAPI, timeout=5)
except (aiohttp.ClientError, asyncio.TimeoutError):
return None
try:
raw_info = await resp.json()
except (aiohttp.ClientError, ValueError):
return None
return {
"ip": raw_info.get("ip"),
"country_code": raw_info.get("country"),
"country_name": raw_info.get("country_name"),
"region_code": raw_info.get("region_code"),
"region_name": raw_info.get("region"),
"city": raw_info.get("city"),
"zip_code": raw_info.get("postal"),
"time_zone": raw_info.get("timezone"),
"latitude": raw_info.get("latitude"),
"longitude": raw_info.get("longitude"),
}
async def _get_ip_api(session: aiohttp.ClientSession) -> Optional[Dict[str, Any]]:
"""Query ip-api.com for location data."""
try:
resp = await session.get(IP_API, timeout=5)
except (aiohttp.ClientError, asyncio.TimeoutError):
return None
try:
raw_info = await resp.json()
except (aiohttp.ClientError, ValueError):
return None
return {
"ip": raw_info.get("query"),
"country_code": raw_info.get("countryCode"),
"country_name": raw_info.get("country"),
"region_code": raw_info.get("region"),
"region_name": raw_info.get("regionName"),
"city": raw_info.get("city"),
"zip_code": raw_info.get("zip"),
"time_zone": raw_info.get("timezone"),
"latitude": raw_info.get("lat"),
"longitude": raw_info.get("lon"),
}
|
leppa/home-assistant
|
homeassistant/util/location.py
|
Python
|
apache-2.0
| 6,274
|
import logging
from google.appengine.ext import ndb
from endpoints_proto_datastore.ndb.model import EndpointsModel, EndpointsAliasProperty
class UserModel(EndpointsModel):
email = ndb.StringProperty()
name = ndb.StringProperty()
|
LookThisCode/DeveloperBus
|
Season 2013/Bogota/Projects/06.Agile_Business/backend/app/models/user.py
|
Python
|
apache-2.0
| 231
|
#!/usr/bin/env python
import telnetlib
import subprocess
import signal
import time
###############################################################
# This script will automatically flash and start a GDB debug
# session to the STM32 discovery board using OpenOCD. It is
# meant to be called from the rake task "debug" (execute
# rake debug) and the working directory is assumed to be the
# project root
###############################################################
###############################################################
# We need to be able to send a SIGTERM (ctrl-c) to GDB
# without killing openocd or this script. Set up a custom
# signal handler here that essentially ignores SIGTERM
###############################################################
def signal_handler(signal, frame):
pass # do nothing
###############################################################
# Start up the openocd thread
###############################################################
# We need gdb to respond to a SIGINT (ctrl-c), but by default,
# that will cause every other child process to die, including
# openocd. Disable sigint, then re-enable it after the child
# spawns. The child inherits the current state of signal
# handlers.
signal.signal(signal.SIGINT, signal.SIG_IGN)
openocd = subprocess.Popen(["openocd"])
time.sleep(2) # Wait for this to start up
# Set up a custom signal handler so that SIGINT doesn't kill
# this script
signal.signal(signal.SIGINT, signal_handler)
###############################################################
# Flash the new image to the development board
###############################################################
# Create the flashable image
subprocess.call(["arm-none-eabi-objcopy", "-Obinary", "build/flash.elf", "build/flash.bin"])
# Flash the image
tn = telnetlib.Telnet("127.0.0.1", "4444")
tn.read_until("> ")
tn.write("poll\n")
tn.read_until("> ")
tn.write("reset halt\n")
tn.read_until("> ")
tn.write("flash probe 0\n")
tn.read_until("> ")
tn.write("flash write_image erase build/flash.bin 0x08000000\n")
tn.read_until("> ")
tn.write("reset\n")
tn.read_until("> ")
tn.write("exit\n")
tn.close()
###############################################################
# Start the gdb session
###############################################################
time.sleep(2)
gdb_proc = subprocess.Popen(["arm-none-eabi-gdb", "-ex", "target remote localhost:3333", "build/flash.elf", "-ex", "set remote hardware-breakpoint-limit 6", "-ex", "set remote hardware-watchpoint-limit 4"])
# Spin until GDB is exited
while gdb_proc.poll() == None:
time.sleep(1)
# Gracefully exit openocd
openocd.terminate()
|
timbrom/lightshow
|
scripts/flash_and_debug.py
|
Python
|
apache-2.0
| 2,657
|
#!/usr/bin/env python
#
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Python Data Types used for the REST objects """
import json
ETHERNET = ['ipv4', 'arp', 'rarp', 'snmp', 'ipv6',
'mpls_u', 'mpls_m', 'lldp', 'pbb', 'bddp']
VERSION = ['1.0.0', '1.1.0', '1.2.0', '1.3.0)']
ACTIONS = ['output',
'set_vlan_vid',
'set_vlan_pcp',
'strip_vlan',
'set_dl_src',
'set_dl_dst',
'set_nw_src',
'set_nw_dst',
'set_nw_tos',
'set_tp_src',
'set_tp_dst',
'enqueue']
CAPABILITIES = ['flow_stats',
'table_stats',
'port_stats',
'stp',
'group_stats',
'reserved',
'ip_reasm',
'queue_stats',
'arp_match_ip',
'port_blocked'
]
PORT_CONFIG = ["port_down",
"no_stp",
"no_recv",
"ro_recv_stp",
"no_flood",
"no_fwd",
"no_packet_in"
]
PORT_STATE = ["link_down",
"blocked",
"live",
"stp_listen",
"stp_learn",
"stp_forward",
"stp_block"
]
PORT_FEATURES = ["rate_10mb_hd",
"rate_10mb_fd",
"rate_100mb_hd",
"rate_100mb_fd",
"rate_1gb_hd",
"rate_1gb_fd",
"rate_10gb_fd",
"rate_40gb_fd",
"rate_100gb_fd",
"rate_1tb_fd",
"rate_other",
"copper",
"fiber",
"autoneg",
"pause",
"pause_asym"
]
FLOW_MOD_CMD = ["add",
"modify",
"modify_strict",
"delete",
"delete_strict"
]
FLOW_MOD_FLAGS = ["send_flow_rem",
"check_overlap",
"emerg",
"reset_counts",
"no_packet_counts",
"no_byte_counts"]
IP_PROTOCOL = ["tcp",
"udp",
"sctp",
"icmp",
"ipv6-icmp"
]
ICMP_V6_TYPE = ["nbr_sol", "nbr_adv"]
MATCH_MODE = ["none", "present", "exact"]
IPV6_EXTHDR = ["no_next",
"esp",
"auth",
"dest",
"frag",
"router",
"hop",
"un_rep",
"un_seq"]
METER_FLAGS = ["kbps",
"pktps",
"burst",
"stats"]
METER_TYPE = ["drop", "dscp_remark", "experimenter"]
GROUP_TYPE = ["all", "select", "indirect", "ff"]
COMMANDS = ["add", "modify", "delete"]
LINK_STATE = ["link_down",
"blocked",
"live",
"stp_listen",
"stp_learn",
"stp_forward",
"stp_block"
]
OPERATION = ["ADD", "CHANGE", "DELETE", "MOVE"]
ENUMS = [ETHERNET,
VERSION,
ACTIONS,
CAPABILITIES,
PORT_CONFIG,
PORT_STATE,
PORT_FEATURES,
FLOW_MOD_CMD,
FLOW_MOD_FLAGS,
IP_PROTOCOL,
ICMP_V6_TYPE,
MATCH_MODE,
ICMP_V6_TYPE,
MATCH_MODE,
IPV6_EXTHDR,
METER_FLAGS,
METER_TYPE,
GROUP_TYPE,
COMMANDS,
LINK_STATE,
OPERATION
]
METHODS = ["factory", "to_json_string", "to_dict"]
KEYWORDS = ["self"]
JSON_MAP = {'datapath': 'Datapath',
'meter_features': 'MeterFeatures',
'group_features': 'GroupFeatures',
'port': 'Port',
'meter': 'Meter',
'flow': 'Flow',
'group': 'Group',
'cluster': 'Cluster',
'packet': 'Packet',
'path': 'Path',
'app': 'App',
'license': 'License',
'support_report': None,
'observation': 'Observation',
'nexthop': 'NextHop'
}
PLURALS = {'datapaths': JSON_MAP['datapath'],
'controller_stats': 'ControllerStats',
'stats': 'Stats',
'ports': JSON_MAP['port'],
'meters': JSON_MAP['meter'],
'flows': JSON_MAP['flow'],
'groups': JSON_MAP['group'],
'clusters': JSON_MAP['cluster'],
'links': 'Link',
'nodes': 'Node',
'arps': 'Arp',
'lldp_suppressed': 'LldpProperties',
'observations': JSON_MAP['observation'],
'packets': JSON_MAP['packet'],
'apps': JSON_MAP['app'],
'licenses': JSON_MAP['license'],
'paths': JSON_MAP['path'],
'nexthops': JSON_MAP['nexthop']
}
CLASS_MAP = {'ControllerStats': {'lost': 'Counter',
'packet_in': 'Counter',
'packet_out': 'Counter'},
'Team': {'systems': 'TeamSystem'},
'Flow': {'match': 'Match',
'actions': 'Action',
'instructions': 'Instruction'},
'Stats': {'port_stats': 'PortStats',
'group_stats': 'GroupStats',
'meter_stats': 'MeterStats'},
'Packet': {'eth': 'Ethernet',
'ip': 'Ip',
'ipv6': 'Ipv6',
'udp': 'Udp',
'tcp': 'Tcp',
'dhcp': 'Dhcp',
'icmp': 'Icmp',
'icmpv6': 'Icmpv6'}
}
class JsonObjectFactory(object):
factories = {}
@staticmethod
def add_factory(id, factory):
JsonObjectFactory.factories[id] = factory
@staticmethod
def create(id, data):
for key in data:
if key in KEYWORDS:
new_key = key + "_"
data[new_key] = data.pop(key)
if id not in JsonObjectFactory.factories:
JsonObjectFactory.add_factory(id, eval(id))
return JsonObjectFactory.factories[id].factory(data)
class JsonObject(object):
""" This is the base class for all HP SDN Client data types."""
def __str__(self):
return self.to_json_string()
def to_json_string(self):
tmp = self.to_dict()
return json.dumps(tmp, sort_keys=True,
indent=4, separators=(',', ': '))
def to_dict(self):
data = {}
attributes = [attr for attr in dir(self)
if not callable(getattr(self, attr))
and not attr.startswith("__")]
for attr in attributes:
if getattr(self, attr) is not None:
value = getattr(self, attr)
if isinstance(value, list):
tmp = []
for list_item in value:
if isinstance(list_item, JsonObject):
tmp.append(list_item.to_dict())
else:
tmp.append(list_item)
data[attr.__str__()] = tmp
elif isinstance(value, JsonObject):
data[attr.__str__()] = value.to_dict()
elif type(value):
data[attr.__str__()] = value
return data
@classmethod
def factory(cls, data):
try:
cm = CLASS_MAP[cls.__name__]
for key in data:
if key in cm and isinstance(data[key], list):
l = []
for d in data[key]:
l.append(JsonObjectFactory.create(cm[key], d))
data[key] = l
elif key in cm:
data[key] = JsonObjectFactory.create(cm[key], data[key])
except KeyError:
pass
return cls(**data)
def __eq__(self, other):
attributes = [attr for attr in dir(self)
if not callable(getattr(self, attr))
and not attr.startswith("__")]
for attr in attributes:
try:
if self.__getattribute__(attr) == other.__getattribute__(attr):
continue
else:
return False
except AttributeError:
return False
else:
return True
# OpenFlow #
class Datapath(JsonObject):
""" Datapath (JsonObject)
A python representation of the Datapath object
"""
def __init__(self, **kwargs):
self.dpid = kwargs.get('dpid', None)
self.negotiated_version = kwargs.get('negotiated_version', None)
self.ready = kwargs.get('ready', None)
self.last_message = kwargs.get('last_message', None)
self.num_buffers = kwargs.get('num_buffers', None)
self.num_tables = kwargs.get('num_tables', None)
self.capabilities = kwargs.get('capabilities', [])
self.device_ip = kwargs.get('device_ip', None)
self.device_port = kwargs.get('device_port', None)
class DatapathControllers(JsonObject):
""" A controller, from a datapath point of view """
def __init__(self, **kwargs):
self.master = kwargs.get('master', None)
self.slaves = kwargs.get('slaves', [])
class MeterFeatures(JsonObject):
def __init__(self, **kwargs):
self.flags = kwargs.get("flags", None)
self.max_bands_per_meter = kwargs.get("max_bands_per_meter", None)
self.max_color_value = kwargs.get("max_color_value", None)
self.max_meters = kwargs.get("max_meters", None)
self.types = kwargs.get("types", None)
class GroupFeatures(JsonObject):
""" Docstirg here"""
def __init__(self, **kwargs):
self.actions = kwargs.get("actions", None)
self.capabilities = kwargs.get("capabilities", None)
self.max_groups = kwargs.get("max_groups", None)
self.types = kwargs.get("types", None)
class Port(JsonObject):
""" Port (JsonObject)
A python representation of the Port object
"""
def __init__(self, **kwargs):
self.id = kwargs.get('id', None)
self.name = kwargs.get('name', None)
self.mac = kwargs.get('mac', None)
self.current_speed = kwargs.get('current_speed', None)
self.max_speed = kwargs.get('max_speed', None)
self.config = kwargs.get('config', [])
self.state = kwargs.get('state', [])
self.current_features = kwargs.get('current_features', [])
self.advertised_features = kwargs.get('advertised_features', [])
self.supported_features = kwargs.get('supported_features', [])
self.peer_features = kwargs.get('peer_features', [])
class Flow(JsonObject):
""" Flow (JsonObject)
A python representation of the Flow object
"""
def __init__(self, **kwargs):
self.table_id = kwargs.get('table_id', None)
self.priority = kwargs.get('priority', None)
self.match = kwargs.get('match', None)
self.duration_sec = kwargs.get('duration_sec', None)
self.duration_nsec = kwargs.get('duration_nsec', None)
self.idle_timeout = kwargs.get('idle_timeout', None)
self.hard_timeout = kwargs.get('hard_timeout', None)
self.packet_count = kwargs.get('packet_count', None)
self.byte_count = kwargs.get('byte_count', None)
self.cookie = kwargs.get('cookie', None)
self.cookie_mask = kwargs.get('cookie_mask', None)
self.buffer_id = kwargs.get('buffer_id', None)
self.out_port = kwargs.get('out_port', None)
self.flow_mod_cmd = kwargs.get('flow_mod_cmd', None)
self.flow_mod_flags = kwargs.get('flow_mod_flags', [])
self.instructions = kwargs.get('instructions', [])
self.actions = kwargs.get('actions', [])
@classmethod
def factory(cls, data):
""" Override factory in the base class to create a single instance of
the Match class for the 'match' key. We do this as each match field
may only exist once. Actions are trickier as keys here are not unique.
When multiple values are present, """
try:
cm = CLASS_MAP[cls.__name__]
for key in data:
if key == 'match':
new_match = {}
for d in data[key]:
for k in d:
new_match[k] = d[k]
data[key] = JsonObjectFactory.create('Match', new_match)
elif key == 'actions':
new_action = {}
keys = []
for d in data[key]:
keys.extend([(k, v) for k, v in d.items()])
num_keys = range(len(keys))
duplicates = {}
for i in num_keys:
key_name = keys[i][0]
if key_name in duplicates:
duplicates[key_name].append(i)
else:
duplicates[key_name] = [i]
for k, v in duplicates.items():
if len(v) > 1:
new_action[k] = [keys[i][1] for i in v]
else:
new_action[k] = keys[i][1]
data[key] = JsonObjectFactory.create('Action', new_action)
elif key in cm and isinstance(data[key], list):
l = []
for d in data[key]:
l.append(JsonObjectFactory.create(cm[key], d))
data[key] = l
elif key in cm:
data[key] = JsonObjectFactory.create(cm[key], data[key])
except KeyError:
pass
return cls(**data)
class Match(JsonObject):
""" Match (JsonObject)
A python representation of the Match object
"""
def __init__(self, **kwargs):
self.in_port = kwargs.get('in_port', None)
self.in_phy_port = kwargs.get('in_phy_port', None)
self.metadata = kwargs.get('metadata', None)
self.tunnel_id = kwargs.get('tunnel_id', None)
self.eth_dst = kwargs.get('eth_dst', None)
self.eth_src = kwargs.get('eth_src', None)
self.eth_type = kwargs.get('eth_type', None)
self.ip_proto = kwargs.get('ip_proto', None)
self.icmpv6_type = kwargs.get('icmpv6_type', None)
self.ipv6_nd_sll = kwargs.get('ipv6_nd_sll', None)
self.ipv6_nd_tll = kwargs.get('ipv6_nd_tll', None)
self.vlan_vid = kwargs.get('vlan_vid', None)
self.mode = kwargs.get('mode', None)
self.vlan_pcp = kwargs.get('vlan_pcp', None)
self.ip_dscp = kwargs.get('ip_dscp', None)
self.ip_ecn = kwargs.get('ip_ecn', None)
self.icmpv4_code = kwargs.get('icmpv4_code', None)
self.icmpv6_code = kwargs.get('icmpv6_type', None)
self.mpls_tc = kwargs.get('mpls_tc', None)
self.mpls_bos = kwargs.get('mpls_bos', None)
self.arp_op = kwargs.get('arp_op', None)
self.ipv6_flabel = kwargs.get('ipv6_flabel', None)
self.mpls_label = kwargs.get('mpls_label', None)
self.pbb_isisd = kwargs.get('pbb_isisd', None)
self.ipv4_src = kwargs.get('ipv4_src', None)
self.ipv4_dst = kwargs.get('ipv4_dst', None)
self.arp_spa = kwargs.get('arp_spa', None)
self.arp_tpa = kwargs.get('arp_tpa', None)
self.ipv6_src = kwargs.get('ipv6_src', None)
self.ipv6_dst = kwargs.get('ipv6_dst', None)
self.ipv6_nd_target = kwargs.get('ipv6_nd_target', None)
self.tcp_src = kwargs.get('tcp_src', None)
self.tcp_dst = kwargs.get('tcp_dst', None)
self.udp_src = kwargs.get('udp_src', None)
self.udp_dst = kwargs.get('udp_dst', None)
self.sctp_src = kwargs.get('sctp_src', None)
self.sctp_dst = kwargs.get('sctp_dst', None)
self.icmpv4_type = kwargs.get('icmpv4_type', None)
self.ipv6_exthdr = kwargs.get('ipv6_exthdr', None)
def to_dict(self):
""" to_dict (self)
Creates a representation of the class as a dictionary
Overrides the parent method as all members variables of
this class are strings
"""
data = []
attributes = [attr for attr in dir(self)
if not callable(getattr(self, attr))
and not attr.startswith("__")]
for attr in attributes:
if getattr(self, attr):
tmp = {}
tmp[attr.__str__()] = getattr(self, attr)
data.append(tmp)
return data
class Action(JsonObject):
""" Action (JsonObject)
A python representation of the Action object
"""
def __init__(self, **kwargs):
self.output = kwargs.get('output', None)
self.copy_ttl_out = kwargs.get('copy_ttl_out', None)
self.copy_ttl_in = kwargs.get('copy_ttl_in', None)
self.set_mpls_ttl = kwargs.get('set_mpls_ttl', None)
self.dec_mpls_ttls = kwargs.get('dec_mpls_ttls', None)
self.push_vlan = kwargs.get('push_vlan', None)
self.pop_vlan = kwargs.get('pop_vlan', None)
self.push_mpls = kwargs.get('push_mpls', None)
self.pop_mpls = kwargs.get('pop_mpls', None)
self.set_queue = kwargs.get('set_queue', None)
self.group = kwargs.get('group', None)
self.set_nw_ttl = kwargs.get('set_nw_ttl', None)
self.dec_nw_ttl = kwargs.get('dec_nw_ttl', None)
self.set_field = kwargs.get('set_field', None)
self.push_pbb = kwargs.get('push_pbb', None)
self.pop_pbb = kwargs.get('pop_pbb', None)
self.experimenter = kwargs.get('experimenter', None)
self.data = kwargs.get('data', None)
def to_dict(self):
""" to_dict (self)
Creates a representation of the class as a dictionary
Overrides the parent method as all members variables of
this class are strings
"""
data = []
attributes = [attr for attr in dir(self)
if not callable(getattr(self, attr))
and not attr.startswith("__")]
for attr in attributes:
if attr == "output":
output = getattr(self, attr)
if type(output) == list:
for port in output:
tmp = {}
tmp[attr.__str__()] = port
data.append(tmp)
elif output:
tmp = {}
tmp[attr.__str__()] = getattr(self, attr)
data.append(tmp)
else:
if getattr(self, attr):
tmp = {}
tmp[attr.__str__()] = getattr(self, attr)
data.append(tmp)
return data
class Instruction(JsonObject,):
""" Instruction (JsonObject)
A python representation of the Instruction object
"""
def __init__(self, **kwargs):
self.clear_actions = kwargs.get('clear_actions', None)
self.write_actions = kwargs.get('write_actions', [])
self.apply_actions = kwargs.get('apply_actions', [])
self.write_metadata = kwargs.get('write_metadata', None)
self.mask = kwargs.get('mask', None)
self.meter = kwargs.get('meter', None)
self.experimenter = kwargs.get('experimenter', None)
class MeterStats(JsonObject):
""" MeterStats (JsonObject)
A python representation of the MeterStats object
"""
def __init__(self, **kwargs):
self.id = kwargs.get('id', None)
self.flow_count = kwargs.get('flow_count', None)
self.packet_count = kwargs.get('packet_count', None)
self.byte_count = kwargs.get('byte_count', None)
self.duration_sec = kwargs.get('duration_sec', None)
self.duration_nsec = kwargs.get('duration_nsec', None)
self.band_stats = kwargs.get('band_stats', [])
class BandStats(JsonObject):
""" BandStats (JsonObject)
A python representation of the BandStats object
"""
def __init__(self, **kwargs):
self.packet_count = kwargs.get('packet_count', None)
self.byte_count = kwargs.get('byte_count', None)
class Meter(JsonObject):
""" Meter (JsonObject)
A python representation of the Meter object
"""
def __init__(self, **kwargs):
self.id = kwargs.get('id', None)
self.command = kwargs.get('command', None)
self.flags = kwargs.get('flags', [])
self.bands = kwargs.get('bands', [])
class MeterBand(JsonObject):
""" MeterBand (JsonObject)
A python representation of the MeterBand object
"""
def __init__(self, **kwargs):
self.burst_size = kwargs.get('burst_size', None)
self.rate = kwargs.get('rate', None)
self.mtype = kwargs.get('mtype', None)
self.prec_level = kwargs.get('prec_level', None)
self.experimenter = kwargs.get('experimenter', None)
class Group(JsonObject):
""" Group (JsonObject)
A python representation of the Group object
"""
def __init__(self, **kwargs):
self.id = kwargs.get('id', None)
self.properties = kwargs.get('properties', None)
self.ref_count = kwargs.get('ref_count', None)
self.packet_count = kwargs.get('packet_count', None)
self.byte_count = kwargs.get('byte_count', None)
self.duration_sec = kwargs.get('duration_sec', None)
self.duration_nsec = kwargs.get('duration_nsec', None)
self.bucket_stats = kwargs.get('bucket_stats', [])
self.type = kwargs.get('type', None)
self.buckets = kwargs.get('buckets', [])
class Bucket(JsonObject):
""" Bucket (JsonObject)
A python representation of the Bucket object
"""
def __init__(self, **kwargs):
self.weight = kwargs.get('weight', None)
self.watch_group = kwargs.get('watch_group', None)
self.watch_port = kwargs.get('watch_port', None)
self.actions = kwargs.get('actions', [])
class Stats(JsonObject):
""" Stats (JsonObject)
A python representation of the Stats object
"""
def __init__(self, **kwargs):
self.dpid = kwargs.get('dpid', None)
self.version = kwargs.get('version', None)
self.port_stats = kwargs.get('port_stats', [])
self.group_stats = kwargs.get('group_stats', [])
self.meter_stats = kwargs.get('meter_stats', [])
class PortStats(JsonObject):
""" PortStats (JsonObject)
A python representation of the PortStats object
"""
def __init__(self, **kwargs):
self.port_id = kwargs.get('id', None)
self.rx_packets = kwargs.get('rx_packets', None)
self.tx_packets = kwargs.get('tx_packets', None)
self.rx_bytes = kwargs.get('rx_bytes', None)
self.tx_bytes = kwargs.get('tx_bytes', None)
self.rx_dropped = kwargs.get('rx_dropped', None)
self.tx_dropped = kwargs.get('tx_dropped', None)
self.rx_errors = kwargs.get('rx_errors', None)
self.tx_errors = kwargs.get('tx_errors', None)
self.collisions = kwargs.get('collisions', None)
self.duration_sec = kwargs.get('duration_sec', None)
self.duration_nsec = kwargs.get('duration_nsec', None)
self.rx_crc_err = kwargs.get('rx_crc_err', None)
self.rx_frame_err = kwargs.get('rx_frame_err', None)
self.rx_over_err = kwargs.get('rx_over_err', None)
class GroupStats(JsonObject):
""" GroupStats (JsonObject)
A python representation of the GroupStats object
"""
def __init__(self, **kwargs):
self.id = kwargs.get('id', None)
self.ref_count = kwargs.get('ref_count', None)
self.packet_count = kwargs.get('packet_count', None)
self.byte_count = kwargs.get('byte_count', None)
self.duration_sec = kwargs.get('duration_sec', None)
self.duration_nsec = kwargs.get('duration_nsec', None)
self.bucket_stats = kwargs.get('bucket_stats', [])
# Network Services #
class Cluster(JsonObject):
""" Cluster (JsonObject)
A python representation of the Cluster object
"""
def __init__(self, **kwargs):
self.uid = kwargs.get('uid', None)
self.links = kwargs.get('links', [])
class Link(JsonObject):
""" Link (JsonObject)
A python representation of the Link object
"""
def __init__(self, **kwargs):
self.src_dpid = kwargs.get('src_dpid', None)
self.src_port = kwargs.get('src_port', None)
self.dst_dpid = kwargs.get('dst_dpid', None)
self.dst_port = kwargs.get('dst_port', None)
self.info = kwargs.get('info', [])
class LinkInfo(JsonObject):
""" LinkInfo (JsonObject)
A python representation of the LinkInfo object
"""
def __init__(self, **kwargs):
self.m_time = kwargs.get('m_time', None)
self.u_time = kwargs.get('s_time', None)
self.src_port_state = kwargs.get('s_pt_state', [])
self.dst_port_state = kwargs.get('d_pt_state', [])
self.link_type = kwargs.get('link_type', None)
# lldp_suppressed == list of LldpProperties
class LldpProperties(JsonObject):
""" LldpProperties (JsonObject)
A python representation of the LldpProperties object
"""
def __init__(self, **kwargs):
self.dpid = kwargs.get('dpid', None)
self.ports = kwargs.get('ports', [])
class Arp(JsonObject):
""" Arp (JsonObject)
A python representation of the Arp object
"""
def __init__(self, **kwargs):
self.ip = kwargs.get('ip', None)
self.mac = kwargs.get('mac', None)
self.vid = kwargs.get('vid', None)
class Node(JsonObject):
""" Node (JsonObject)
A python representation of the Node object
"""
def __init__(self, **kwargs):
self.ip = kwargs.get('ip', None)
self.mac = kwargs.get('mac', None)
self.vid = kwargs.get('vid', None)
self.dpid = kwargs.get('dpid', None)
self.port = kwargs.get('port', None)
class Path(JsonObject):
""" Path (JsonObject)
A python representation of the Path object
"""
def __init__(self, **kwargs):
self.cost = kwargs.get('cost', None)
self.links = kwargs.get('links', [])
class LinkSync(JsonObject):
""" LinkSync ()
A python representation of the LinkSync object
"""
def __init__(self, **kwargs):
self.s_dpid = kwargs.get('s_dpid', None)
self.s_port = kwargs.get('s_port', None)
self.d_dpid = kwargs.get('d_dpid', None)
self.d_port = kwargs.get('d_port', None)
self.info = kwargs.get('info', None)
class ClusterSync(JsonObject):
""" ClusterSync()
A python representation of the ClusterSync object
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id", None)
self.root = kwargs.get("root", None)
self.nodes = kwargs.get("nodes", None)
class NodeSync(JsonObject):
""" NodeSync()
A python representation of the NodeSync object
"""
def __init__(self, **kwargs):
self.dpid = kwargs.get('dpid', None)
self.links = kwargs.get('links', None)
class NodeLink(JsonObject):
""" NodeLink()
A Pyhton representation of the NodeLink object
"""
def __init__(self, **kwargs):
self.s_dpid = kwargs.get('s_dpid', None)
self.s_port = kwargs.get('s_port', None)
self.d_dpid = kwargs.get('d_dpid', None)
self.d_port = kwargs.get('d_port', None)
self.s_pt_state = kwargs.get('s_pt_state', None)
self.d_pt_state = kwargs.get('d_pt_state', None)
class NodeMessage(JsonObject):
""" NodeMessage()
A Python representation of the NodeMessage object
"""
def __init__(self, **kwargs):
self.ip = kwargs.get('ip', None)
self.mac = kwargs.get('mac', None)
self.vid = kwargs.get('vid', None)
self.dpid = kwargs.get('dpid', None)
self.port = kwargs.get('operation', None)
# Lldp_sync == a list of LldpProperties
class Btree(JsonObject):
""" Btree()
A Python representation of the Btree object
"""
def __init__(self, **kwargs):
self.links = kwargs.get('links', [])
self.costs = kwargs.get('costs', [])
class BtreeLink(JsonObject):
""" BtreeLink()
A Python representation of the BtreeLink object
"""
def __init__(self, **kwargs):
self.dpid = kwargs.get('dpid', None)
self.link = kwargs.get('link', [])
class TreeLink(JsonObject):
""" TreeLink()
A Python representation of the TreeLink object
"""
def __init__(self, **kwargs):
self.s_dpid = kwargs.get('s_dpid', None)
self.s_port = kwargs.get('s_port', None)
self.d_dpid = kwargs.get('d_dpid', None)
self.d_port = kwargs.get('d_port', None)
class Cost(JsonObject):
""" Cost()
A Python representation of the Cost object
"""
def __init__(self, **kwargs):
self.dpid = kwargs.get("dpid", None)
self.cost = kwargs.get("cost", None)
# Core #
class AuditLogEntry(JsonObject):
""" AuditLogEntry()
A Python representation of the AuditLogEnrty object
"""
def __init__(self, **kwargs):
self.uid = kwargs.get("uid", [])
self.system_uid = kwargs.get("system_uid", None)
self.user = kwargs.get("user", None)
self.ts = kwargs.get("ts", None)
self.activity = kwargs.get("activity", None)
self.description = kwargs.get("description", None)
class Alert(JsonObject):
""" Alert()
A Python representation of the Alert object
"""
def __init__(self, **kwargs):
self.uid = kwargs.get("uid", None)
self.org = kwargs.get("org", None)
self.ts = kwargs.get("ts", None)
self.sev = kwargs.get("sev", None)
self.state = kwargs.get("state", None)
self.topic = kwargs.get("topic", None)
self.desc = kwargs.get("desc", None)
self.system_uid = kwargs.get("system_uid", None)
class AlertTopic(JsonObject):
""" AlertTopic()
A Python representation of the AlertTopic object
"""
def __init__(self, **kwargs):
self.topic = kwargs.get("topic", None)
self.org = kwargs.get("org", None)
self.desc = kwargs.get("desc", None)
class AlertTopicListener(JsonObject):
""" AlertTopicListener()
A Python representation of the AlertTopicListener object
"""
def __init__(self, **kwargs):
self.uid = kwargs.get("topic", None)
self.app_id = kwargs.get("org", None)
self.name = kwargs.get("desc", None)
self.callbacks = kwargs.get("desc", [])
class Callback(JsonObject):
""" Callback()
A Python representation of the Callback object
"""
def __init__(self, **kwargs):
self.topics = kwargs.get("topics", [])
self.uri = kwargs.get("uri", None)
class Config(JsonObject):
""" Config()
A Python representation of the Config class
"""
def __init__(self, **kwargs):
self.age_out_days = kwargs.get("age_out_days", [])
self.trim_enabled = kwargs.get("trim_enabled", [])
self.trim_interval_hours = kwargs.get("trim_interval_hours", [])
class ConfigItem(JsonObject):
""" ConfigItem()
A Python representation of the ConfigItem class
"""
def __init__(self, **kwargs):
self.val = kwargs.get("val", None)
self.def_val = kwargs.get("def_val", None)
self.desc = kwargs.get("desc", None)
class SupportEntry(JsonObject):
""" SupportEntry()
A Python representation of the SupportEntry class
"""
def __init__(self, **kwargs):
self.title = kwargs.get("title", None)
self.id = kwargs.get("id", None)
self.content = kwargs.get("content", [])
class System(JsonObject):
""" A system """
def __init__(self, **kwargs):
self.uid = kwargs.get("uid", None)
self.version = kwargs.get("version", None)
self.role = kwargs.get("role", None)
self.core_data_version = kwargs.get("core_data_version", None)
self.core_data_version_timestamp = kwargs.get(
"core_data_version_timestamp", None
)
self.time = kwargs.get("time", None)
self.self_ = kwargs.get("self_", None)
self.status = kwargs.get("status", None)
class ControllerNode(JsonObject):
""" A Controller Node """
def __init__(self, **kwargs):
self.ip = kwargs.get("ip", None)
self.name = kwargs.get("name", None)
class Region(JsonObject):
""" A Region """
def __init__(self, **kwargs):
self.uid = kwargs.get("uid", None)
self.master = kwargs.get("master", None)
self.slaves = kwargs.get("slaves", [])
self.devices = kwargs.get("devices", [])
class Team(JsonObject):
""" Team()
A Python representation of the Team object
"""
def __init__(self, **kwargs):
self.name = kwargs.get("name", None)
self.ip = kwargs.get("ip", None)
self.version = kwargs.get("version")
self.systems = kwargs.get("systems")
class TeamSystem(JsonObject):
""" TeamSystems()
A Python object to represent the systems that belong to a team.
"""
def __init__(self, **kwargs):
self.name = kwargs.get("name", None)
self.ip = kwargs.get("name", None)
self.priority = kwargs.get("name", None)
class Metric(JsonObject):
""" Metric()
A Python object to represent the Metric object.
"""
def __init__(self, **kwargs):
self.uid = kwargs.get("uid", None)
self.app_id = kwargs.get("app_id", None)
self.name = kwargs.get("name", None)
self.type = kwargs.get("type", None)
self.description = kwargs.get("description", None)
self.primary_tag = kwargs.get("primary_tag", None)
self.secondary_tag = kwargs.get("secondary_tag", None)
self.jmx = kwargs.get("jmx", None)
self.persistence = kwargs.get("persistence", None)
self.summary_interval = kwargs.get("summary_interval", None)
self.priming_value = kwargs.get("priming_value", None)
class MetricUpdate(JsonObject):
""" Metric()
A Python object to represent the Metric object.
"""
def __init__(self, **kwargs):
self.uid = kwargs.get("uid", None)
self.value = kwargs.get("value", None)
self.int_value = kwargs.get("int_value", None)
self.numerator = kwargs.get("numerator", None)
self.denominator = kwargs.get("denominator", None)
self.decrement = kwargs.get("decrement", None)
self.increment = kwargs.get("increment", None)
self.mark = kwargs.get("mark", None)
self.type = kwargs.get("type", None)
class License(JsonObject):
""" A License """
def __init__(self, **kwargs):
self.install_id = kwargs.get("install_id", None)
self.serial_no = kwargs.get("serial_no", None)
self.product = kwargs.get("product", None)
self.license_metric = kwargs.get("license_metric", None)
self.metric_qty = kwargs.get("metric_qty", None)
self.license_type = kwargs.get("license_type", None)
self.base_license = kwargs.get("base_license", None)
self.creation_date = kwargs.get("creation_date", None)
self.activated_date = kwargs.get("activated_date", None)
self.expiry_date = kwargs.get("expiry_date", None)
self.license_status = kwargs.get("license_status", None)
self.deactivated_key = kwargs.get("deactivated_key", None)
class Packet(JsonObject):
""" Packet()
A Python object to represent the Packet object
"""
def __init__(self, **kwargs):
self.type = kwargs.get("type", None)
self.uid = kwargs.get("uid", None)
self.eth = kwargs.get("eth", None)
self.ip = kwargs.get("ip", None)
self.icmp = kwargs.get("icmp", None)
self.ipv6 = kwargs.get("ipv6", None)
self.icmpv6 = kwargs.get("icmpv6", None)
self.tcp = kwargs.get("tcp", None)
self.udp = kwargs.get("udp", None)
self.dhcp = kwargs.get("dhcp", None)
class Ethernet(JsonObject):
""" Ethernet()
A python object to represent the Ethernet header
"""
def __init__(self, **kwargs):
self.eth_src = kwargs.get("eth_src", None)
self.eth_dst = kwargs.get("eth_dst", None)
self.eth_type = kwargs.get("eth_type", None)
self.vlan_vid = kwargs.get("vlan_vid", None)
self.vlan_pcp = kwargs.get("vlan_pcp", None)
class Ip(JsonObject):
""" Ip()
A python object to represent the Ip header
"""
def __init__(self, **kwargs):
self.ipv4_src = kwargs.get("ipv4_src", None)
self.ipv4_dst = kwargs.get("ipv4_dst", None)
self.ip_proto = kwargs.get("ip_proto", None)
self.ip_dscp = kwargs.get("ip_dscp", None)
self.ip_ecn = kwargs.get("ip_ecn", None)
self.ip_ident = kwargs.get("ip_ident", 0)
class Icmp(JsonObject):
""" Icmp()
A python object to represent the Icmp header
"""
def __init__(self, **kwargs):
self.icmp_code = kwargs.get("icmp_code", None)
class Ipv6(JsonObject):
""" Ipv6()
A python object to represent the Ipv6 header
"""
def __init__(self, **kwargs):
self.ipv6_src = kwargs.get("ipv4_src", None)
self.ipv6_dst = kwargs.get("ipv4_dst", None)
self.ip_proto = kwargs.get("ip_proto", None)
self.ip_dscn = kwargs.get("ip_dscn", None)
self.ip_hop_limit = kwargs.get("ip_hop_limit", None)
class Icmpv6(JsonObject):
""" Icmp()
A python object to represent the Icmp header
"""
def __init__(self, **kwargs):
self.icmp_type_code = kwargs.get("icmp_code", None)
self.is_sender_router = kwargs.get("is_sender_router", None)
self.is_solicit_response = kwargs.get("is_solicit_response", None)
self.override = kwargs.get("override", None)
self.target_address = ('target_address', None)
class Tcp(JsonObject):
""" Tcp()
A Pyhton representation of the TCP header
"""
def __init__(self, **kwargs):
self.tcp_dst = kwargs.get("tcp_dst", None)
self.tcp_src = kwargs.get("tcp_src", None)
class Udp(JsonObject):
""" Udp()
A Pyhton representation of the UDP header
"""
def __init__(self, **kwargs):
self.udp_dst = kwargs.get("udp_dst", None)
self.udp_src = kwargs.get("udp_src", None)
class Dhcp(JsonObject):
""" Dhcp()
A Python representation of the Dhcp message
"""
def __init__(self, **kwargs):
self.opcode = kwargs.get("opcode", None)
self.boot_flags = kwargs.get("boot_flags", None)
self.client_ip = kwargs.get("client_ip", None)
self.your_client_ip = kwargs.get("your_client_ip", None)
self.next_server_ip = kwargs.get("next_server_ip", None)
self.relay_agent_ip = kwargs.get("relay_agent_ip", None)
self.client_mac = kwargs.get("client_mac", None)
self.options = kwargs.get("options", None)
class DhcpOptions(JsonObject):
""" DhcpOptions()
A Python representation of DHCP Options
"""
def __init__(self, **kwargs):
self.type = kwargs.get("type", None)
self.parameter_request_list = kwargs.get("parameter_request_list",
None)
class App(JsonObject):
""" An app """
def __init__(self, **kwargs):
self.deployed = kwargs.get("deployed", None)
self.desc = kwargs.get("desc", None)
self.name = kwargs.get("name", None)
self.state = kwargs.get("state", None)
self.uid = kwargs.get("uid", None)
self.vendor = kwargs.get("vendor", None)
self.version = kwargs.get("version", None)
class AppHealth(JsonObject):
""" An app health object """
def __init__(self, **kwargs):
self.uid = kwargs.get("uid", None)
self.deployed = kwargs.get("deployed", None)
self.name = kwargs.get("name", None)
self.state = kwargs.get("state", None)
self.status = kwargs.get("status", None)
class MetricApp(JsonObject):
""" An application with metering data on disk """
def __init__(self, **kwargs):
self.app_id = kwargs.get("app_id", None)
self.app_name = kwargs.get("app_name", None)
class MetricValues(JsonObject):
""" The metric values """
def __init__(self, **kwargs):
self.type = kwargs.get("type", None)
self.uid = kwargs.get("uid", None)
self.datapoint_count = kwargs.get("datapoint_count", None)
self.datapoints = kwargs.get("datapoints", [])
class DataPoint(JsonObject):
""" A datapoint """
def __init__(self, **kwargs):
self.count = kwargs.get("count", None)
self.milliseconds_span = kwargs.get("milliseconds_span", None)
self.update_time = kwargs.get("upate_time", None)
class NextHop(JsonObject):
def __init__(self, **kwargs):
self.dpid = kwargs.get("dpid", None)
self.out_port = kwargs.get("out_port", None)
class ControllerStats(JsonObject):
def __init__(self, **kwargs):
self.uid = kwargs.get("uid", None)
self.duration_ms = kwargs.get("duration_ms", None)
self.lost = kwargs.get("lost", None)
self.msg_in = kwargs.get("msg_in", None)
self.msg_out = kwargs.get("msg_out", None)
self.packet_in = kwargs.get("packet_in", None)
self.packet_out = kwargs.get("packet_out", None)
class Counter(JsonObject):
def __init__(self, **kwargs):
self.packets = kwargs.get("packets", None)
self.bytes = kwargs.get("bytes", None)
class Observation(JsonObject):
def __init__(self, **kwargs):
self.dpid = kwargs.get("dpid", None)
self.type = kwargs.get("type", None)
self.packet_uid = kwargs.get("packet_uid", None)
self.status = kwargs.get("status", None)
CLASS_LIST = [s() for s in JsonObject.__subclasses__()]
|
chrissmall22/odl-client
|
odlclient/datatypes.py
|
Python
|
apache-2.0
| 43,110
|
import json
from boxsdk.object.base_object import BaseObject
from ..util.api_call_decorator import api_call
class Comment(BaseObject):
"""An object that represents a comment on an item"""
_item_type = 'comment'
@staticmethod
def construct_params_from_message(message: str) -> dict:
message_type = 'tagged_message' if '@[' in message else 'message'
return {
message_type: message
}
@api_call
def reply(self, message: str) -> 'Comment':
"""
Add a reply to the comment.
:param message:
The content of the reply comment.
"""
url = self.get_type_url()
data = self.construct_params_from_message(message)
data['item'] = {
'type': 'comment',
'id': self.object_id
}
box_response = self._session.post(url, data=json.dumps(data))
response = box_response.json()
return self.translator.translate(
session=self._session,
response_object=response,
)
@api_call
def edit(self, message: str) -> 'Comment':
"""
Edit the message of the comment.
:param message:
The content of the reply comment.
"""
data = self.construct_params_from_message(message)
return self.update_info(data=data)
|
box/box-python-sdk
|
boxsdk/object/comment.py
|
Python
|
apache-2.0
| 1,357
|
# Copyright (c) 2011 Justin Santa Barbara
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
from oslo_log import log as logging
from oslo_serialization import jsonutils
import requests
import six
LOG = logging.getLogger(__name__)
class APIResponse(object):
"""Decoded API Response
This provides a decoded version of the Requests response which
include a json decoded body, far more convenient for testing that
returned structures are correct, or using parts of returned
structures in tests.
This class is a simple wrapper around dictionaries for API
responses in tests. It includes extra attributes so that they can
be inspected in addition to the attributes.
All json responses from Nova APIs are dictionary compatible, or
blank, so other possible base classes are not needed.
"""
status = 200
"""The HTTP status code as an int"""
content = ""
"""The Raw HTTP response body as a string"""
body = {}
"""The decoded json body as a dictionary"""
headers = {}
"""Response headers as a dictionary"""
def __init__(self, response):
"""Construct an API response from a Requests response
:param response: a ``requests`` library response
"""
super(APIResponse, self).__init__()
self.status = response.status_code
self.content = response.content
if self.content:
self.body = jsonutils.loads(self.content)
self.headers = response.headers
def __str__(self):
# because __str__ falls back to __repr__ we can still use repr
# on self but add in the other attributes.
return "<Response body:%r, status_code:%s>" % (self.body, self.status)
class OpenStackApiException(Exception):
def __init__(self, message=None, response=None):
self.response = response
if not message:
message = 'Unspecified error'
if response:
_status = response.status_code
_body = response.content
message = ('%(message)s\nStatus Code: %(_status)s\n'
'Body: %(_body)s' %
{'message': message, '_status': _status,
'_body': _body})
super(OpenStackApiException, self).__init__(message)
class OpenStackApiAuthenticationException(OpenStackApiException):
def __init__(self, response=None, message=None):
if not message:
message = "Authentication error"
super(OpenStackApiAuthenticationException, self).__init__(message,
response)
class OpenStackApiAuthorizationException(OpenStackApiException):
def __init__(self, response=None, message=None):
if not message:
message = "Authorization error"
super(OpenStackApiAuthorizationException, self).__init__(message,
response)
class OpenStackApiNotFoundException(OpenStackApiException):
def __init__(self, response=None, message=None):
if not message:
message = "Item not found"
super(OpenStackApiNotFoundException, self).__init__(message, response)
class TestOpenStackClient(object):
"""Simple OpenStack API Client.
This is a really basic OpenStack API client that is under our control,
so we can make changes / insert hooks for testing
"""
def __init__(self, auth_user, auth_key, auth_uri,
project_id=None):
super(TestOpenStackClient, self).__init__()
self.auth_result = None
self.auth_user = auth_user
self.auth_key = auth_key
self.auth_uri = auth_uri
if project_id is None:
self.project_id = "6f70656e737461636b20342065766572"
else:
self.project_id = project_id
self.microversion = None
def request(self, url, method='GET', body=None, headers=None):
_headers = {'Content-Type': 'application/json'}
_headers.update(headers or {})
response = requests.request(method, url, data=body, headers=_headers)
return response
def _authenticate(self):
if self.auth_result:
return self.auth_result
auth_uri = self.auth_uri
headers = {'X-Auth-User': self.auth_user,
'X-Auth-Key': self.auth_key,
'X-Auth-Project-Id': self.project_id}
response = self.request(auth_uri,
headers=headers)
http_status = response.status_code
LOG.debug("%(auth_uri)s => code %(http_status)s",
{'auth_uri': auth_uri, 'http_status': http_status})
if http_status == 401:
raise OpenStackApiAuthenticationException(response=response)
self.auth_result = response.headers
return self.auth_result
def api_request(self, relative_uri, check_response_status=None,
strip_version=False, **kwargs):
auth_result = self._authenticate()
# NOTE(justinsb): httplib 'helpfully' converts headers to lower case
base_uri = auth_result['x-server-management-url']
if strip_version:
# NOTE(vish): cut out version number and tenant_id
base_uri = '/'.join(base_uri.split('/', 3)[:-1])
full_uri = '%s/%s' % (base_uri, relative_uri)
headers = kwargs.setdefault('headers', {})
headers['X-Auth-Token'] = auth_result['x-auth-token']
if self.microversion:
headers['X-OpenStack-Nova-API-Version'] = self.microversion
response = self.request(full_uri, **kwargs)
http_status = response.status_code
LOG.debug("%(relative_uri)s => code %(http_status)s",
{'relative_uri': relative_uri, 'http_status': http_status})
if check_response_status:
if http_status not in check_response_status:
if http_status == 404:
raise OpenStackApiNotFoundException(response=response)
elif http_status == 401:
raise OpenStackApiAuthorizationException(response=response)
else:
raise OpenStackApiException(
message="Unexpected status code",
response=response)
return response
def _decode_json(self, response):
resp = APIResponse(status=response.status_code)
if response.content:
resp.body = jsonutils.loads(response.content)
return resp
def api_get(self, relative_uri, **kwargs):
kwargs.setdefault('check_response_status', [200])
return APIResponse(self.api_request(relative_uri, **kwargs))
def api_post(self, relative_uri, body, **kwargs):
kwargs['method'] = 'POST'
if body:
headers = kwargs.setdefault('headers', {})
headers['Content-Type'] = 'application/json'
kwargs['body'] = jsonutils.dumps(body)
kwargs.setdefault('check_response_status', [200, 202])
return APIResponse(self.api_request(relative_uri, **kwargs))
def api_put(self, relative_uri, body, **kwargs):
kwargs['method'] = 'PUT'
if body:
headers = kwargs.setdefault('headers', {})
headers['Content-Type'] = 'application/json'
kwargs['body'] = jsonutils.dumps(body)
kwargs.setdefault('check_response_status', [200, 202, 204])
return APIResponse(self.api_request(relative_uri, **kwargs))
def api_delete(self, relative_uri, **kwargs):
kwargs['method'] = 'DELETE'
kwargs.setdefault('check_response_status', [200, 202, 204])
return APIResponse(self.api_request(relative_uri, **kwargs))
#####################################
#
# Convenience methods
#
# The following are a set of convenience methods to get well known
# resources, they can be helpful in setting up resources in
# tests. All of these convenience methods throw exceptions if they
# get a non 20x status code, so will appropriately abort tests if
# they fail.
#
# They all return the most relevant part of their response body as
# decoded data structure.
#
#####################################
def get_server(self, server_id):
return self.api_get('/servers/%s' % server_id).body['server']
def get_servers(self, detail=True, search_opts=None):
rel_url = '/servers/detail' if detail else '/servers'
if search_opts is not None:
qparams = {}
for opt, val in six.iteritems(search_opts):
qparams[opt] = val
if qparams:
query_string = "?%s" % urllib.urlencode(qparams)
rel_url += query_string
return self.api_get(rel_url).body['servers']
def post_server(self, server):
response = self.api_post('/servers', server).body
if 'reservation_id' in response:
return response
else:
return response['server']
def put_server(self, server_id, server):
return self.api_put('/servers/%s' % server_id, server).body
def post_server_action(self, server_id, data):
return self.api_post('/servers/%s/action' % server_id, data).body
def delete_server(self, server_id):
return self.api_delete('/servers/%s' % server_id)
def get_image(self, image_id):
return self.api_get('/images/%s' % image_id).body['image']
def get_images(self, detail=True):
rel_url = '/images/detail' if detail else '/images'
return self.api_get(rel_url).body['images']
def post_image(self, image):
return self.api_post('/images', image).body['image']
def delete_image(self, image_id):
return self.api_delete('/images/%s' % image_id)
def get_flavor(self, flavor_id):
return self.api_get('/flavors/%s' % flavor_id).body['flavor']
def get_flavors(self, detail=True):
rel_url = '/flavors/detail' if detail else '/flavors'
return self.api_get(rel_url).body['flavors']
def post_flavor(self, flavor):
return self.api_post('/flavors', flavor).body['flavor']
def delete_flavor(self, flavor_id):
return self.api_delete('/flavors/%s' % flavor_id)
def post_extra_spec(self, flavor_id, spec):
return self.api_post('/flavors/%s/os-extra_specs' %
flavor_id, spec)
def get_volume(self, volume_id):
return self.api_get('/os-volumes/%s' % volume_id).body['volume']
def get_volumes(self, detail=True):
rel_url = '/os-volumes/detail' if detail else '/os-volumes'
return self.api_get(rel_url).body['volumes']
def post_volume(self, volume):
return self.api_post('/os-volumes', volume).body['volume']
def delete_volume(self, volume_id):
return self.api_delete('/os-volumes/%s' % volume_id)
def get_snapshot(self, snap_id):
return self.api_get('/os-snapshots/%s' % snap_id).body['snapshot']
def get_snapshots(self, detail=True):
rel_url = '/os-snapshots/detail' if detail else '/os-snapshots'
return self.api_get(rel_url).body['snapshots']
def post_snapshot(self, snapshot):
return self.api_post('/os-snapshots', snapshot).body['snapshot']
def delete_snapshot(self, snap_id):
return self.api_delete('/os-snapshots/%s' % snap_id)
def get_server_volume(self, server_id, attachment_id):
return self.api_get('/servers/%s/os-volume_attachments/%s' %
(server_id, attachment_id)
).body['volumeAttachment']
def get_server_volumes(self, server_id):
return self.api_get('/servers/%s/os-volume_attachments' %
(server_id)).body['volumeAttachments']
def post_server_volume(self, server_id, volume_attachment):
return self.api_post('/servers/%s/os-volume_attachments' %
(server_id), volume_attachment
).body['volumeAttachment']
def delete_server_volume(self, server_id, attachment_id):
return self.api_delete('/servers/%s/os-volume_attachments/%s' %
(server_id, attachment_id))
def post_server_metadata(self, server_id, metadata):
post_body = {'metadata': {}}
post_body['metadata'].update(metadata)
return self.api_post('/servers/%s/metadata' % server_id,
post_body).body['metadata']
def get_server_groups(self, all_projects=None):
if all_projects:
return self.api_get(
'/os-server-groups?all_projects').body['server_groups']
else:
return self.api_get('/os-server-groups').body['server_groups']
def get_server_group(self, group_id):
return self.api_get('/os-server-groups/%s' %
group_id).body['server_group']
def post_server_groups(self, group):
response = self.api_post('/os-server-groups', {"server_group": group})
return response.body['server_group']
def delete_server_group(self, group_id):
self.api_delete('/os-server-groups/%s' % group_id)
def get_instance_actions(self, server_id):
return self.api_get('/servers/%s/os-instance-actions' %
(server_id)).body['instanceActions']
|
cernops/nova
|
nova/tests/functional/api/client.py
|
Python
|
apache-2.0
| 14,003
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script to test TF-TRT INT8 conversion without calibration on Mnist model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.compiler.tf2tensorrt.python.ops import trt_ops
# pylint: enable=unused-import
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import data
from tensorflow.python import keras
from tensorflow.python.compiler.tensorrt import trt_convert
from tensorflow.python.estimator.estimator import Estimator
from tensorflow.python.estimator.model_fn import EstimatorSpec
from tensorflow.python.estimator.model_fn import ModeKeys
from tensorflow.python.estimator.run_config import RunConfig
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.datasets import mnist
from tensorflow.python.layers import layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import saver
from tensorflow.python.training.adam import AdamOptimizer
from tensorflow.python.training.checkpoint_management import latest_checkpoint
from tensorflow.python.training.training_util import get_global_step
INPUT_NODE_NAME = 'input'
OUTPUT_NODE_NAME = 'output'
class QuantizationAwareTrainingMNISTTest(test_util.TensorFlowTestCase):
def _BuildGraph(self, x):
def _Quantize(x, r):
x = gen_array_ops.quantize_and_dequantize_v2(x, -r, r)
return x
def _DenseLayer(x, num_inputs, num_outputs, quantization_range, name):
"""Dense layer with quantized outputs.
Args:
x: input to the dense layer
num_inputs: number of input columns of x
num_outputs: number of output columns
quantization_range: the min/max range for quantization
name: name of the variable scope
Returns:
The output of the layer.
"""
with variable_scope.variable_scope(name):
kernel = variable_scope.get_variable(
'kernel',
shape=[num_inputs, num_outputs],
dtype=dtypes.float32,
initializer=keras.initializers.glorot_uniform())
bias = variable_scope.get_variable(
'bias',
shape=[num_outputs],
dtype=dtypes.float32,
initializer=keras.initializers.zeros())
x = math_ops.matmul(x, kernel)
x = _Quantize(x, quantization_range)
x = nn.bias_add(x, bias)
x = _Quantize(x, quantization_range)
return x
x = _Quantize(x, 1)
# Conv + Bias + Relu6
x = layers.conv2d(x, filters=32, kernel_size=3, use_bias=True)
x = nn.relu6(x)
# Conv + Bias + Relu6
x = layers.conv2d(x, filters=64, kernel_size=3, use_bias=True)
x = nn.relu6(x)
# Reduce
x = math_ops.reduce_mean(x, [1, 2])
x = _Quantize(x, 6)
# FC1
x = _DenseLayer(x, 64, 512, 6, name='dense')
x = nn.relu6(x)
# FC2
x = _DenseLayer(x, 512, 10, 25, name='dense_1')
x = array_ops.identity(x, name=OUTPUT_NODE_NAME)
return x
def _GetGraphDef(self, use_trt, max_batch_size, model_dir):
"""Get the frozen mnist GraphDef.
Args:
use_trt: whether use TF-TRT to convert the graph.
max_batch_size: the max batch size to apply during TF-TRT conversion.
model_dir: the model directory to load the checkpoints.
Returns:
The frozen mnist GraphDef.
"""
graph = ops.Graph()
with self.session(graph=graph) as sess:
with graph.device('/GPU:0'):
x = array_ops.placeholder(
shape=(None, 28, 28, 1), dtype=dtypes.float32, name=INPUT_NODE_NAME)
self._BuildGraph(x)
# Load weights
mnist_saver = saver.Saver()
checkpoint_file = latest_checkpoint(model_dir)
mnist_saver.restore(sess, checkpoint_file)
# Freeze
graph_def = graph_util.convert_variables_to_constants(
sess, sess.graph_def, output_node_names=[OUTPUT_NODE_NAME])
# Convert with TF-TRT
if use_trt:
logging.info('Number of nodes before TF-TRT conversion: %d',
len(graph_def.node))
graph_def = trt_convert.create_inference_graph(
graph_def,
outputs=[OUTPUT_NODE_NAME],
max_batch_size=max_batch_size,
precision_mode='INT8',
# There is a 2GB GPU memory limit for each test, so we set
# max_workspace_size_bytes to 256MB to leave enough room for TF
# runtime to allocate GPU memory.
max_workspace_size_bytes=1 << 28,
minimum_segment_size=2,
use_calibration=False,
)
logging.info('Number of nodes after TF-TRT conversion: %d',
len(graph_def.node))
num_engines = len(
[1 for n in graph_def.node if str(n.op) == 'TRTEngineOp'])
self.assertEqual(1, num_engines)
return graph_def
def _Run(self, is_training, use_trt, batch_size, num_epochs, model_dir):
"""Train or evaluate the model.
Args:
is_training: whether to train or evaluate the model. In training mode,
quantization will be simulated where the quantize_and_dequantize_v2 are
placed.
use_trt: if true, use TRT INT8 mode for evaluation, which will perform
real quantization. Otherwise use native TensorFlow which will perform
simulated quantization. Ignored if is_training is True.
batch_size: batch size.
num_epochs: how many epochs to train. Ignored if is_training is False.
model_dir: where to save or load checkpoint.
Returns:
The Estimator evaluation result.
"""
# Get dataset
train_data, test_data = mnist.load_data()
def _PreprocessFn(x, y):
x = math_ops.cast(x, dtypes.float32)
x = array_ops.expand_dims(x, axis=2)
x = 2.0 * (x / 255.0) - 1.0
y = math_ops.cast(y, dtypes.int32)
return x, y
def _EvalInputFn():
mnist_x, mnist_y = test_data
dataset = data.Dataset.from_tensor_slices((mnist_x, mnist_y))
dataset = dataset.apply(
data.experimental.map_and_batch(
map_func=_PreprocessFn,
batch_size=batch_size,
num_parallel_calls=8))
dataset = dataset.repeat(count=1)
iterator = dataset.make_one_shot_iterator()
features, labels = iterator.get_next()
return features, labels
def _TrainInputFn():
mnist_x, mnist_y = train_data
dataset = data.Dataset.from_tensor_slices((mnist_x, mnist_y))
dataset = dataset.shuffle(2 * len(mnist_x))
dataset = dataset.apply(
data.experimental.map_and_batch(
map_func=_PreprocessFn,
batch_size=batch_size,
num_parallel_calls=8))
dataset = dataset.repeat(count=num_epochs)
iterator = dataset.make_one_shot_iterator()
features, labels = iterator.get_next()
return features, labels
def _ModelFn(features, labels, mode):
if is_training:
logits_out = self._BuildGraph(features)
else:
graph_def = self._GetGraphDef(use_trt, batch_size, model_dir)
logits_out = importer.import_graph_def(
graph_def,
input_map={INPUT_NODE_NAME: features},
return_elements=[OUTPUT_NODE_NAME + ':0'],
name='')[0]
loss = losses.sparse_softmax_cross_entropy(
labels=labels, logits=logits_out)
summary.scalar('loss', loss)
classes_out = math_ops.argmax(logits_out, axis=1, name='classes_out')
accuracy = metrics.accuracy(
labels=labels, predictions=classes_out, name='acc_op')
summary.scalar('accuracy', accuracy[1])
if mode == ModeKeys.EVAL:
return EstimatorSpec(
mode, loss=loss, eval_metric_ops={'accuracy': accuracy})
elif mode == ModeKeys.TRAIN:
optimizer = AdamOptimizer(learning_rate=1e-2)
train_op = optimizer.minimize(loss, global_step=get_global_step())
return EstimatorSpec(mode, loss=loss, train_op=train_op)
config_proto = config_pb2.ConfigProto()
config_proto.gpu_options.allow_growth = True
estimator = Estimator(
model_fn=_ModelFn,
model_dir=model_dir if is_training else None,
config=RunConfig(session_config=config_proto))
if is_training:
estimator.train(_TrainInputFn)
results = estimator.evaluate(_EvalInputFn)
logging.info('accuracy: %s', str(results['accuracy']))
return results
# To generate the checkpoint, set a different model_dir and call self._Run()
# by setting is_training=True and num_epochs=1000, e.g.:
# model_dir = '/tmp/quantization_mnist'
# self._Run(
# is_training=True,
# use_trt=False,
# batch_size=128,
# num_epochs=100,
# model_dir=model_dir)
def testEval(self):
if not trt_convert.is_tensorrt_enabled():
return
model_dir = test.test_src_dir_path('python/compiler/tensorrt/test/testdata')
accuracy_tf_native = self._Run(
is_training=False,
use_trt=False,
batch_size=128,
num_epochs=None,
model_dir=model_dir)['accuracy']
logging.info('accuracy_tf_native: %f', accuracy_tf_native)
self.assertAllClose(0.9662, accuracy_tf_native, rtol=1e-3, atol=1e-3)
if trt_convert.get_linked_tensorrt_version()[0] < 5:
return
accuracy_tf_trt = self._Run(
is_training=False,
use_trt=True,
batch_size=128,
num_epochs=None,
model_dir=model_dir)['accuracy']
logging.info('accuracy_tf_trt: %f', accuracy_tf_trt)
self.assertAllClose(0.9675, accuracy_tf_trt, rtol=1e-3, atol=1e-3)
if __name__ == '__main__':
test.main()
|
jendap/tensorflow
|
tensorflow/python/compiler/tensorrt/test/quantization_mnist_test.py
|
Python
|
apache-2.0
| 10,917
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ironicclient import exceptions
from ironic_inspector import node_cache
from ironic_inspector import utils
def hook(introspection_data, **kwargs):
ironic = utils.get_client()
try:
node = ironic.node.create(**{'driver': 'fake'})
except exceptions.HttpError as exc:
raise utils.Error(_("Can not create node in ironic for unknown"
"node: %s") % exc)
return node_cache.add_node(node.uuid, ironic=ironic)
|
Tehsmash/inspector-hooks
|
inspector_hooks/enroll_node_not_found.py
|
Python
|
apache-2.0
| 1,013
|
"""Support for Aurora ABB PowerOne Solar Photvoltaic (PV) inverter."""
from __future__ import annotations
from collections.abc import Mapping
import logging
from typing import Any
from aurorapy.client import AuroraError, AuroraSerialClient
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ENERGY_KILO_WATT_HOUR, POWER_WATT, TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .aurora_device import AuroraEntity
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
SENSOR_TYPES = [
SensorEntityDescription(
key="instantaneouspower",
device_class=SensorDeviceClass.POWER,
native_unit_of_measurement=POWER_WATT,
state_class=SensorStateClass.MEASUREMENT,
name="Power Output",
),
SensorEntityDescription(
key="temp",
device_class=SensorDeviceClass.TEMPERATURE,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=TEMP_CELSIUS,
state_class=SensorStateClass.MEASUREMENT,
name="Temperature",
),
SensorEntityDescription(
key="totalenergy",
device_class=SensorDeviceClass.ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
state_class=SensorStateClass.TOTAL_INCREASING,
name="Total Energy",
),
]
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up aurora_abb_powerone sensor based on a config entry."""
entities = []
client = hass.data[DOMAIN][config_entry.entry_id]
data = config_entry.data
for sens in SENSOR_TYPES:
entities.append(AuroraSensor(client, data, sens))
_LOGGER.debug("async_setup_entry adding %d entities", len(entities))
async_add_entities(entities, True)
class AuroraSensor(AuroraEntity, SensorEntity):
"""Representation of a Sensor on a Aurora ABB PowerOne Solar inverter."""
def __init__(
self,
client: AuroraSerialClient,
data: Mapping[str, Any],
entity_description: SensorEntityDescription,
) -> None:
"""Initialize the sensor."""
super().__init__(client, data)
self.entity_description = entity_description
self.available_prev = True
def update(self):
"""Fetch new state data for the sensor.
This is the only method that should fetch new data for Home Assistant.
"""
try:
self.available_prev = self._attr_available
self.client.connect()
if self.entity_description.key == "instantaneouspower":
# read ADC channel 3 (grid power output)
power_watts = self.client.measure(3, True)
self._attr_native_value = round(power_watts, 1)
elif self.entity_description.key == "temp":
temperature_c = self.client.measure(21)
self._attr_native_value = round(temperature_c, 1)
elif self.entity_description.key == "totalenergy":
energy_wh = self.client.cumulated_energy(5)
self._attr_native_value = round(energy_wh / 1000, 2)
self._attr_available = True
except AuroraError as error:
self._attr_state = None
self._attr_native_value = None
self._attr_available = False
# aurorapy does not have different exceptions (yet) for dealing
# with timeout vs other comms errors.
# This means the (normal) situation of no response during darkness
# raises an exception.
# aurorapy (gitlab) pull request merged 29/5/2019. When >0.2.6 is
# released, this could be modified to :
# except AuroraTimeoutError as e:
# Workaround: look at the text of the exception
if "No response after" in str(error):
_LOGGER.debug("No response from inverter (could be dark)")
else:
raise error
finally:
if self._attr_available != self.available_prev:
if self._attr_available:
_LOGGER.info("Communication with %s back online", self.name)
else:
_LOGGER.warning(
"Communication with %s lost",
self.name,
)
if self.client.serline.isOpen():
self.client.close()
|
mezz64/home-assistant
|
homeassistant/components/aurora_abb_powerone/sensor.py
|
Python
|
apache-2.0
| 4,749
|
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.object_storage import base
from tempest.common import custom_matchers
from tempest.common import utils
from tempest.lib import decorators
class CrossdomainTest(base.BaseObjectTest):
@classmethod
def resource_setup(cls):
super(CrossdomainTest, cls).resource_setup()
cls.xml_start = '<?xml version="1.0"?>\n' \
'<!DOCTYPE cross-domain-policy SYSTEM ' \
'"http://www.adobe.com/xml/dtds/cross-domain-policy.' \
'dtd" >\n<cross-domain-policy>\n'
cls.xml_end = "</cross-domain-policy>"
def setUp(self):
super(CrossdomainTest, self).setUp()
@decorators.idempotent_id('d1b8b031-b622-4010-82f9-ff78a9e915c7')
@utils.requires_ext(extension='crossdomain', service='object')
def test_get_crossdomain_policy(self):
url = self.account_client._get_base_version_url() + "crossdomain.xml"
resp, body = self.account_client.raw_request(url, "GET")
self.account_client._error_checker(resp, body)
body = body.decode()
self.assertTrue(body.startswith(self.xml_start) and
body.endswith(self.xml_end))
# The target of the request is not any Swift resource. Therefore, the
# existence of response header is checked without a custom matcher.
self.assertIn('content-length', resp)
self.assertIn('content-type', resp)
self.assertIn('x-trans-id', resp)
self.assertIn('date', resp)
# Check only the format of common headers with custom matcher
self.assertThat(resp, custom_matchers.AreAllWellFormatted())
|
cisco-openstack/tempest
|
tempest/api/object_storage/test_crossdomain.py
|
Python
|
apache-2.0
| 2,264
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import copy
import json
import urllib
import urlparse
import uuid
from keystone import config
from keystone.common import dependency
from keystone.contrib.oauth2 import core
from keystone.tests import test_v3
CONF = config.CONF
class OAuth2BaseTests(test_v3.RestfulTestCase):
EXTENSION_NAME = 'oauth2'
EXTENSION_TO_ADD = 'oauth2_extension'
PATH_PREFIX = '/OS-OAUTH2'
CONSUMER_URL = PATH_PREFIX + '/consumers'
USERS_URL = '/users/{user_id}'
ACCESS_TOKENS_URL = PATH_PREFIX + '/access_tokens'
DEFAULT_REDIRECT_URIS = [
'https://%s.com' %uuid.uuid4().hex,
'https://%s.com' %uuid.uuid4().hex
]
DEFAULT_SCOPES = [
uuid.uuid4().hex,
uuid.uuid4().hex,
'all_info'
]
def setUp(self):
super(OAuth2BaseTests, self).setUp()
# Now that the app has been served, we can query CONF values
self.base_url = 'http://localhost/v3'
# NOTE(garcianavalon) I've put this line for dependency injection to work,
# but I don't know if its the right way to do it...
self.manager = core.Manager()
def _create_consumer(self, name=None, description=None,
client_type='confidential',
redirect_uris=DEFAULT_REDIRECT_URIS,
grant_type='authorization_code',
scopes=DEFAULT_SCOPES,
**kwargs):
if not name:
name = uuid.uuid4().hex
data = {
'name': name,
'description': description,
'client_type': client_type,
'redirect_uris': redirect_uris,
'grant_type': grant_type,
'scopes': scopes
}
# extra
data.update(kwargs)
response = self.post(self.CONSUMER_URL, body={'consumer': data})
return response.result['consumer'], data
def _create_user_and_tenant(self):
pass
class ConsumerCRUDTests(OAuth2BaseTests):
def test_create_consumer(self):
consumer, data = self._create_consumer()
self.assertEqual(consumer['description'], data['description'])
self.assertIsNotNone(consumer['id'])
self.assertIsNotNone(consumer['name'])
self.assertIsNotNone(consumer['secret'])
# NOTE(garcianavalon) removed because owner field is removed
# self.assertEqual(self.user['id'], consumer['owner'])
def test_create_consumer_with_extra(self):
extra_data = {
'url': uuid.uuid4().hex,
'image': uuid.uuid4().hex
}
consumer, data = self._create_consumer(**extra_data)
self.assertEqual(consumer['description'], data['description'])
self.assertIsNotNone(consumer['id'])
self.assertIsNotNone(consumer['name'])
self.assertIsNotNone(consumer['secret'])
# NOTE(garcianavalon) removed because owner field is removed
# self.assertEqual(self.user['id'], consumer['owner'])
for k in extra_data:
self.assertEqual(extra_data[k], consumer[k])
def test_consumer_delete(self):
consumer, data = self._create_consumer()
consumer_id = consumer['id']
response = self.delete(self.CONSUMER_URL + '/%s' % consumer_id,
expected_status=204)
def test_consumer_delete_bad_id(self):
consumer, data = self._create_consumer()
consumer_id = uuid.uuid4().hex
response = self.delete(self.CONSUMER_URL + '/%s' % consumer_id,
expected_status=404)
def test_consumer_get(self):
consumer, data = self._create_consumer()
consumer_id = consumer['id']
response = self.get(self.CONSUMER_URL + '/%s' % consumer_id)
self_url = ['http://localhost/v3', self.CONSUMER_URL,
'/', consumer_id]
self_url = ''.join(self_url)
self.assertEqual(response.result['consumer']['links']['self'], self_url)
self.assertEqual(response.result['consumer']['id'], consumer_id)
def test_consumer_get_bad_id(self):
self.get(self.CONSUMER_URL + '/%(consumer_id)s'
% {'consumer_id': uuid.uuid4().hex},
expected_status=404)
def test_consumer_list(self):
self._create_consumer()
response = self.get(self.CONSUMER_URL)
entities = response.result['consumers']
self.assertIsNotNone(entities)
self_url = ['http://localhost/v3', self.CONSUMER_URL]
self_url = ''.join(self_url)
self.assertEqual(response.result['links']['self'], self_url)
self.assertValidListLinks(response.result['links'])
def test_consumer_update(self):
consumer, data = self._create_consumer()
original_id = consumer['id']
original_description = consumer['description'] or ''
update_description = original_description + '_new'
update_scopes = ['new_scopes']
update_redirect_uris = ['new_uris']
body = {
'consumer': {
'description': update_description,
'scopes': update_scopes,
'redirect_uris': update_redirect_uris
}
}
update_response = self.patch(self.CONSUMER_URL + '/%s' % original_id,
body=body)
consumer = update_response.result['consumer']
self.assertEqual(consumer['description'], update_description)
self.assertEqual(consumer['scopes'], update_scopes)
self.assertEqual(consumer['redirect_uris'], update_redirect_uris)
self.assertEqual(consumer['id'], original_id)
def test_consumer_update_bad_secret(self):
consumer, data = self._create_consumer()
original_id = consumer['id']
update_ref = copy.deepcopy(consumer)
update_ref['description'] = uuid.uuid4().hex
update_ref['secret'] = uuid.uuid4().hex
self.patch(self.CONSUMER_URL + '/%s' % original_id,
body={'consumer': update_ref},
expected_status=400)
def test_consumer_update_bad_id(self):
consumer, data = self._create_consumer()
original_id = consumer['id']
original_description = consumer['description'] or ''
update_description = original_description + "_new"
update_ref = copy.deepcopy(consumer)
update_ref['description'] = update_description
update_ref['id'] = uuid.uuid4().hex
self.patch(self.CONSUMER_URL + '/%s' % original_id,
body={'consumer': update_ref},
expected_status=400)
@dependency.requires('oauth2_api')
class AccessTokenEndpointTests(OAuth2BaseTests):
def new_access_token_ref(self, user_id, consumer_id):
token_ref = {
'id':uuid.uuid4().hex,
'consumer_id':consumer_id,
'authorizing_user_id':user_id,
'scopes': [uuid.uuid4().hex],
'expires_at':uuid.uuid4().hex,
}
return token_ref
def _create_access_token(self, user_id, consumer_id):
token_ref = self.new_access_token_ref(user_id, consumer_id)
access_token = self.oauth2_api.store_access_token(token_ref)
return access_token
def _list_access_tokens(self, user_id, expected_status=200):
url = self.USERS_URL.format(user_id=user_id) + self.ACCESS_TOKENS_URL
response = self.get(url, expected_status=expected_status)
return response.result['access_tokens']
def _get_access_token(self, user_id, token_id, expected_status=200):
url = (self.USERS_URL.format(user_id=user_id) + self.ACCESS_TOKENS_URL
+ '/{0}'.format(token_id))
response = self.get(url, expected_status=expected_status)
return response.result['access_token']
def _revoke_access_token(self, user_id, token_id, expected_status=204):
url = (self.USERS_URL.format(user_id=user_id) + self.ACCESS_TOKENS_URL
+ '/{0}'.format(token_id))
self.delete(url, expected_status=expected_status)
def test_list_access_tokens(self):
consumer_id = uuid.uuid4().hex
number_of_tokens = 2
access_tokens_reference = []
for i in range(number_of_tokens):
token = self._create_access_token(self.user['id'], consumer_id)
access_tokens_reference.append(token)
access_tokens = self._list_access_tokens(self.user['id'])
actual_tokens = set([t['id'] for t in access_tokens])
reference_tokens = set([t['id'] for t in access_tokens_reference])
self.assertEqual(actual_tokens, reference_tokens)
def test_get_access_token(self):
consumer_id = uuid.uuid4().hex
token = self._create_access_token(self.user['id'], consumer_id)
token = self._get_access_token(self.user['id'], token['id'])
# TODO(garcianavalon) access_token assertions
def test_revoke_access_token(self):
consumer_id = uuid.uuid4().hex
token = self._create_access_token(self.user['id'], consumer_id)
self._revoke_access_token(self.user['id'], token['id'])
actual_token = self._get_access_token(self.user['id'], token['id'])
self.assertEqual(actual_token['valid'], False)
# TODO(garcianavalon) test revoke identity api tokens
# TODO(garcianavalon) test can't get more identity api tokens
class OAuth2FlowBaseTests(OAuth2BaseTests):
def setUp(self):
super(OAuth2FlowBaseTests, self).setUp()
self.consumer, self.data = self._create_consumer()
def _flowstep_request_authorization(self, redirect_uri, scope,
expected_status=200, format_scope=True,
response_type='code', client_id=None):
if format_scope:
# Transform the array with the requested scopes into a list of
# space-delimited, case-sensitive strings as specified in RFC 6749
# http://tools.ietf.org/html/rfc6749#section-3.3
scope_string = ' '.join(scope)
else:
scope_string = scope
if not client_id:
client_id = self.consumer['id']
# NOTE(garcianavalon) we use a list of tuples to ensure param order
# in the query string to be able to mock it during testing.
credentials = [
('client_id', client_id),
('redirect_uri', redirect_uri),
('scope', scope_string),
('state', uuid.uuid4().hex)
]
if response_type:
credentials.append(('response_type', response_type))
query = urllib.urlencode(credentials)
authorization_url = '/OS-OAUTH2/authorize?%s' %query
# GET authorization_url to request the authorization
return self.get(authorization_url,
expected_status=expected_status)
def _flowstep_grant_authorization(self, response, scopes,
expected_status=302, **kwargs):
# POST authorization url to simulate ResourceOwner granting authorization
consumer_id = response.result['data']['consumer']['id']
data = {
"user_auth": {
"client_id":consumer_id,
"scopes":scopes
}
}
return self.post('/OS-OAUTH2/authorize',
body=data,
expected_status=expected_status,
**kwargs)
def _extract_header_query_string(self, response):
redirect_uri = response.headers['Location']
query_params = urlparse.parse_qs(urlparse.urlparse(redirect_uri).query)
return query_params
def _http_basic(self, consumer_id, consumer_secret):
auth_string = consumer_id + ':' + consumer_secret
return 'Basic ' + base64.b64encode(auth_string)
def _generate_urlencoded_request(self, authorization_code,
consumer_id, consumer_secret):
# NOTE(garcianavalon) No use for now, keystone only accepts JSON bodies
body = 'grant_type=authorization_code&code=%s&redirect_uri=%s' %authorization_code, self.DEFAULT_REDIRECT_URIS[0]
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': self._http_basic(consumer_id, consumer_secret)
}
return headers, body
def _generate_json_request(self, authorization_code, consumer_id, consumer_secret):
body = {
'token_request' : {
'grant_type':'authorization_code',
'code': authorization_code,
'redirect_uri':self.DEFAULT_REDIRECT_URIS[0]
}
}
headers = {
'Authorization': self._http_basic(consumer_id, consumer_secret)
}
return headers, body
def _extract_authorization_code_from_header(self, response):
query_params = self._extract_header_query_string(response)
authorization_code = query_params['code'][0]
return authorization_code
def _flowstep_obtain_access_token(self, response, expected_status=200):
authorization_code = self._extract_authorization_code_from_header(response)
consumer_id = self.consumer['id']
consumer_secret = self.consumer['secret']
headers, body = self._generate_json_request(authorization_code,
consumer_id, consumer_secret)
#POST to the token url
return self.post('/OS-OAUTH2/access_token', body=body,
headers=headers, expected_status=expected_status)
def _auth_body(self, access_token, project=None):
body = {
"auth": {
"identity": {
"methods": [
"oauth2"
],
"oauth2": {
'access_token_id':access_token['access_token']
},
}
}
}
if project:
body['auth']['scope'] = {
"project": {
"id": project
}
}
return body
def _assert_non_fatal_errors(self, response):
error = response.result['error']
self.assertIsNotNone(error['error'])
if hasattr(error, 'description'):
self.assertIsNotNone(error['description'])
if hasattr(error, 'state'):
self.assertIsNotNone(error['state'])
def _assert_access_token(self, response,
expected_scopes=None):
access_token = response.result
self.assertIsNotNone(access_token['access_token'])
self.assertIsNotNone(access_token['token_type'])
self.assertIsNotNone(access_token['expires_in'])
self.assertIsNotNone(access_token['refresh_token'])
scope = response.result['scope']
if not expected_scopes:
expected_scopes = ' '.join(self.DEFAULT_SCOPES)
self.assertEqual(scope, expected_scopes)
class OAuth2AuthorizationCodeFlowTests(OAuth2FlowBaseTests):
def test_flowstep_request_authorization(self):
expected_redirect_uri = self.DEFAULT_REDIRECT_URIS[0]
expected_scopes = self.DEFAULT_SCOPES
response = self._flowstep_request_authorization(
scope=expected_scopes,
redirect_uri=expected_redirect_uri)
self.assertIsNotNone(response.result['data'])
data = response.result['data']
self.assertIsNotNone(data['redirect_uri'])
self.assertIsNotNone(data['requested_scopes'])
self.assertIsNotNone(data['consumer'])
self.assertIsNotNone(data['consumer']['id'])
consumer_id = data['consumer']['id']
self.assertEqual(consumer_id, self.consumer['id'])
self.assertEqual(data['requested_scopes'], expected_scopes)
self.assertEqual(data['redirect_uri'], expected_redirect_uri)
def test_flowstep_grant_authorization(self):
expected_redirect_uri = self.DEFAULT_REDIRECT_URIS[0]
expected_scopes = self.DEFAULT_SCOPES
get_response = self._flowstep_request_authorization(
scope=expected_scopes,
redirect_uri=expected_redirect_uri)
response = self._flowstep_grant_authorization(get_response,
scopes=expected_scopes)
self.assertIsNotNone(response.headers['Location'])
query_params = self._extract_header_query_string(response)
self.assertIsNotNone(query_params['code'][0])
self.assertIsNotNone(query_params['state'][0])
def test_granting_authorization_by_different_user_fails(self):
""" Make the grant authorization step with a different
authenticated user to check the code is only granted to the
redirected user. The response should be a 404 Not Found because no
consumer has requested authorization for this user
"""
# TODO(garcianavalon) what if other consumer has requested the authorization
# for the second user???
# First, request authorzation for our user
expected_redirect_uri = self.DEFAULT_REDIRECT_URIS[0]
expected_scopes = self.DEFAULT_SCOPES
get_response = self._flowstep_request_authorization(
scope=expected_scopes,
redirect_uri=expected_redirect_uri)
# create the other user
domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assignment_api.create_domain(domain1['id'], domain1)
project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'domain_id': domain1['id']}
self.assignment_api.create_project(project1['id'], project1)
user_foo = self.new_user_ref(domain_id=test_v3.DEFAULT_DOMAIN_ID)
password = user_foo['password']
user_foo = self.identity_api.create_user(user_foo)
user_foo['password'] = password
# TODO(garcianavalon) Im sure there is a better way to do this
roles = self.assignment_api.list_roles()
role_admin = next(r for r in roles if r['name'] == 'admin')
self.assignment_api.create_grant(
user_id=user_foo['id'],
project_id=project1['id'],
role_id=role_admin['id'])
# Get a scoped token for the project
auth_data = self.build_authentication_request(
username=user_foo['name'],
user_domain_id=test_v3.DEFAULT_DOMAIN_ID,
password=user_foo['password'],
project_name=project1['name'],
project_domain_id=domain1['id'])
# Try to grant authorization as the other user
response = self._flowstep_grant_authorization(get_response,
scopes=expected_scopes,
expected_status=404,
auth=auth_data)
def test_second_request_overrides_previous_credentials(self):
""" Simulate the use case where the user gets redirected a
second time by the same client.
"""
# First make two requests with different scopes
expected_redirect_uri = self.DEFAULT_REDIRECT_URIS[0]
expected_scope1 = [self.DEFAULT_SCOPES[0]]
get_response1 = self._flowstep_request_authorization(
scope=expected_scope1,
redirect_uri=expected_redirect_uri)
scopes1 = get_response1.result['data']['requested_scopes']
self.assertEqual(scopes1, expected_scope1)
expected_scope2 = [self.DEFAULT_SCOPES[1]]
get_response2 = self._flowstep_request_authorization(
scope=expected_scope2,
redirect_uri=expected_redirect_uri)
scopes2 = get_response2.result['data']['requested_scopes']
self.assertEqual(scopes2, expected_scope2)
self.assertNotEqual(scopes2, scopes1)
# TODO(garcianavalon) without using states this test is stupid because
# the scopes returned in the response object are directly the ones in the
# request and they are not stored with the credentials. Therefore, when
# the client grants authorization it doesn't matter wich scopes where
# requested in the first place because they are not saved, permission is
# granted directly to the scopes in the POST request.
# Solutions possible: add support for states and/or store the requested
# scopes too
# Now try to grant authorization using the first credentials to verify
# it's not valid anymore
response1 = self._flowstep_grant_authorization(get_response1,
scopes=scopes1,
expected_status=302)
# Now grant authorization using the second credentials
response2 = self._flowstep_grant_authorization(get_response2,
scopes=scopes2,
expected_status=302)
def test_malformed_scopes_in_query(self):
""" Scope must be a list (string) of space-delimited, case-sensitive
strings. This is a non fatal error and the provider will
notify it in the response body
"""
malformed_scope = "&".join(self.DEFAULT_SCOPES)
response = self._flowstep_request_authorization(
redirect_uri=self.DEFAULT_REDIRECT_URIS[0],
scope=malformed_scope,
format_scope=False)
self._assert_non_fatal_errors(response)
def test_invalid_scopes_in_query(self):
""" The requested scope of access must be included in the registered
scopes of the client. This is a non fatal error and the provider will
notify it in the response body
We ignore this value anyway (the scope granted in the end depends
solely in the value submited by the user in the grant authorization step)
but this value is the one showed in the info presented to the resource owner,
so it's a good practice to check we actually allow the client that scope before.
"""
new_scopes = [uuid.uuid4().hex]
response = self._flowstep_request_authorization(
redirect_uri=self.DEFAULT_REDIRECT_URIS[0],
scope=new_scopes)
self._assert_non_fatal_errors(response)
def test_invalid_response_type_in_query(self):
""" The response type must be set to 'code'. This is a non fatal error and
the provider will notify it in the response body
"""
response = self._flowstep_request_authorization(
redirect_uri=self.DEFAULT_REDIRECT_URIS[0],
scope=self.DEFAULT_SCOPES,
response_type=uuid.uuid4().hex)
self._assert_non_fatal_errors(response)
def test_missing_response_type_in_query(self):
""" The response type missing is a non fatal error and the provider will
notify it in the response body
"""
response = self._flowstep_request_authorization(
redirect_uri=self.DEFAULT_REDIRECT_URIS[0],
scope=self.DEFAULT_SCOPES,
response_type=None)
self._assert_non_fatal_errors(response)
def test_invalid_client_id_in_query(self):
""" The client_id must be provided and present in our backend."""
response = self._flowstep_request_authorization(
redirect_uri=self.DEFAULT_REDIRECT_URIS[0],
scope=self.DEFAULT_SCOPES,
client_id=uuid.uuid4().hex,
expected_status=404)
def test_granted_scope_is_the_one_submited_by_user(self):
""" Ensure that the scope we are going to give to the authorization code (and
therefore to the access token) is the one submited by the user and not
the one requested by the client.
"""
pass
class OAuth2AccessTokenFromCodeFlowTests(OAuth2FlowBaseTests):
def test_flowstep_obtain_access_token(self):
expected_redirect_uri = self.DEFAULT_REDIRECT_URIS[0]
expected_scopes = self.DEFAULT_SCOPES
get_response = self._flowstep_request_authorization(
scope=expected_scopes,
redirect_uri=expected_redirect_uri)
post_response = self._flowstep_grant_authorization(
get_response, scopes=expected_scopes)
response = self._flowstep_obtain_access_token(post_response)
self._assert_access_token(
response, expected_scopes=' '.join(expected_scopes))
def test_access_code_only_one_use(self):
expected_redirect_uri = self.DEFAULT_REDIRECT_URIS[0]
expected_scopes = self.DEFAULT_SCOPES
get_response = self._flowstep_request_authorization(
scope=expected_scopes,
redirect_uri=expected_redirect_uri)
post_response = self._flowstep_grant_authorization(get_response,
scopes=expected_scopes)
response_ok = self._flowstep_obtain_access_token(post_response,
expected_status=200)
response_not = self._flowstep_obtain_access_token(post_response,
expected_status=401)
def _exchange_access_token_assertions(self, response):
token = json.loads(response.body)['token']
#self.assertEqual(token['project']['id'],self.project_id)
self.assertEqual(token['user']['id'], self.user_id)
self.assertEqual(token['methods'], ["oauth2"])
self.assertIsNotNone(token['expires_at'])
def test_auth_with_access_token_no_scope(self):
scope = ['all_info']
expected_redirect_uri = self.DEFAULT_REDIRECT_URIS[0]
get_response = self._flowstep_request_authorization(
scope=scope,
redirect_uri=expected_redirect_uri)
post_response = self._flowstep_grant_authorization(get_response,
scopes=scope)
response = self._flowstep_obtain_access_token(post_response)
access_token = response.result
body = self._auth_body(access_token)
# POST to the auth url as an unauthenticated user to get a keystone token
response = self.post('/auth/tokens', body=body, noauth=True)
self._exchange_access_token_assertions(response)
def test_auth_with_access_token_with_scope(self):
scope = ['all_info']
expected_redirect_uri = self.DEFAULT_REDIRECT_URIS[0]
get_response = self._flowstep_request_authorization(
scope=scope,
redirect_uri=expected_redirect_uri)
post_response = self._flowstep_grant_authorization(get_response,
scopes=scope)
response = self._flowstep_obtain_access_token(post_response)
access_token = response.result
body = self._auth_body(access_token, project=self.project_id)
# POST to the auth url as an unauthenticated user to get a keystone token
response = self.post('/auth/tokens', body=body)
self._exchange_access_token_assertions(response)
class OAuth2PasswordGrantFlowTests(OAuth2FlowBaseTests):
# NOTE(garcianavalon) because right now we can't sent
# a domain id in the Password Grant, we need to use the
# default_domain_user or the validator will fail
def _assert_keystone_token(self, response):
token = json.loads(response.body)['token']
#self.assertEqual(token['project']['id'],self.project_id)
self.assertEqual(token['user']['id'],
self.default_domain_user['id'])
self.assertEqual(token['methods'], ["oauth2"])
self.assertIsNotNone(token['expires_at'])
def _generate_urlencoded_request(self):
# NOTE(garcianavalon) in order to use this content type the
# UrlencodedBodyMiddleware provided in the extension must be
# in the pipeline
body = ('grant_type=password&username={username}'
'&password={password}').format(
username=self.default_domain_user['name'],
password=self.default_domain_user['password'])
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': self._http_basic(
self.consumer['id'], self.consumer['secret'])
}
return headers, body
def _generate_json_request(self, scope=None):
# NOTE(garcianavalon) this is non-compliant with the
# rfc6749 spec. Used when the UrlencodedBodyMiddleware
# is not available in a keystone deployment
body = {
'token_request' : {
'grant_type':'password',
'username': self.default_domain_user['name'],
'password': self.default_domain_user['password'],
}
}
if scope:
body['token_request']['scope'] = scope
headers = {
'Authorization': self._http_basic(
self.consumer['id'], self.consumer['secret'])
}
return headers, body
def _access_token_request(self, scope=None, expected_status=200):
headers, body = self._generate_json_request(scope=scope)
return self.post('/OS-OAUTH2/access_token', body=body,
headers=headers, expected_status=expected_status)
def _obtain_keystone_token(self, body):
# POST as an unauthenticated user to get a keystone token
return self.post('/auth/tokens', body=body, noauth=True)
def test(self):
scope = 'all_info'
response = self._access_token_request(scope=scope)
self._assert_access_token(response,
expected_scopes=scope)
def test_auth_with_access_token(self):
scope = 'all_info'
at_response = self._access_token_request(scope=scope)
body = self._auth_body(at_response.result)
kt_response = self._obtain_keystone_token(body=body)
self._assert_keystone_token(kt_response)
|
ging/keystone
|
keystone/tests/test_v3_oauth2.py
|
Python
|
apache-2.0
| 31,537
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from ..builder import BBOX_ASSIGNERS
from ..iou_calculators import build_iou_calculator
from ..transforms import bbox_xyxy_to_cxcywh
from .assign_result import AssignResult
from .base_assigner import BaseAssigner
@BBOX_ASSIGNERS.register_module()
class UniformAssigner(BaseAssigner):
"""Uniform Matching between the anchors and gt boxes, which can achieve
balance in positive anchors, and gt_bboxes_ignore was not considered for
now.
Args:
pos_ignore_thr (float): the threshold to ignore positive anchors
neg_ignore_thr (float): the threshold to ignore negative anchors
match_times(int): Number of positive anchors for each gt box.
Default 4.
iou_calculator (dict): iou_calculator config
"""
def __init__(self,
pos_ignore_thr,
neg_ignore_thr,
match_times=4,
iou_calculator=dict(type='BboxOverlaps2D')):
self.match_times = match_times
self.pos_ignore_thr = pos_ignore_thr
self.neg_ignore_thr = neg_ignore_thr
self.iou_calculator = build_iou_calculator(iou_calculator)
def assign(self,
bbox_pred,
anchor,
gt_bboxes,
gt_bboxes_ignore=None,
gt_labels=None):
num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0)
# 1. assign -1 by default
assigned_gt_inds = bbox_pred.new_full((num_bboxes, ),
0,
dtype=torch.long)
assigned_labels = bbox_pred.new_full((num_bboxes, ),
-1,
dtype=torch.long)
if num_gts == 0 or num_bboxes == 0:
# No ground truth or boxes, return empty assignment
if num_gts == 0:
# No ground truth, assign all to background
assigned_gt_inds[:] = 0
assign_result = AssignResult(
num_gts, assigned_gt_inds, None, labels=assigned_labels)
assign_result.set_extra_property(
'pos_idx', bbox_pred.new_empty(0, dtype=torch.bool))
assign_result.set_extra_property('pos_predicted_boxes',
bbox_pred.new_empty((0, 4)))
assign_result.set_extra_property('target_boxes',
bbox_pred.new_empty((0, 4)))
return assign_result
# 2. Compute the L1 cost between boxes
# Note that we use anchors and predict boxes both
cost_bbox = torch.cdist(
bbox_xyxy_to_cxcywh(bbox_pred),
bbox_xyxy_to_cxcywh(gt_bboxes),
p=1)
cost_bbox_anchors = torch.cdist(
bbox_xyxy_to_cxcywh(anchor), bbox_xyxy_to_cxcywh(gt_bboxes), p=1)
# We found that topk function has different results in cpu and
# cuda mode. In order to ensure consistency with the source code,
# we also use cpu mode.
# TODO: Check whether the performance of cpu and cuda are the same.
C = cost_bbox.cpu()
C1 = cost_bbox_anchors.cpu()
# self.match_times x n
index = torch.topk(
C, # c=b,n,x c[i]=n,x
k=self.match_times,
dim=0,
largest=False)[1]
# self.match_times x n
index1 = torch.topk(C1, k=self.match_times, dim=0, largest=False)[1]
# (self.match_times*2) x n
indexes = torch.cat((index, index1),
dim=1).reshape(-1).to(bbox_pred.device)
pred_overlaps = self.iou_calculator(bbox_pred, gt_bboxes)
anchor_overlaps = self.iou_calculator(anchor, gt_bboxes)
pred_max_overlaps, _ = pred_overlaps.max(dim=1)
anchor_max_overlaps, _ = anchor_overlaps.max(dim=0)
# 3. Compute the ignore indexes use gt_bboxes and predict boxes
ignore_idx = pred_max_overlaps > self.neg_ignore_thr
assigned_gt_inds[ignore_idx] = -1
# 4. Compute the ignore indexes of positive sample use anchors
# and predict boxes
pos_gt_index = torch.arange(
0, C1.size(1),
device=bbox_pred.device).repeat(self.match_times * 2)
pos_ious = anchor_overlaps[indexes, pos_gt_index]
pos_ignore_idx = pos_ious < self.pos_ignore_thr
pos_gt_index_with_ignore = pos_gt_index + 1
pos_gt_index_with_ignore[pos_ignore_idx] = -1
assigned_gt_inds[indexes] = pos_gt_index_with_ignore
if gt_labels is not None:
assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)
pos_inds = torch.nonzero(
assigned_gt_inds > 0, as_tuple=False).squeeze()
if pos_inds.numel() > 0:
assigned_labels[pos_inds] = gt_labels[
assigned_gt_inds[pos_inds] - 1]
else:
assigned_labels = None
assign_result = AssignResult(
num_gts,
assigned_gt_inds,
anchor_max_overlaps,
labels=assigned_labels)
assign_result.set_extra_property('pos_idx', ~pos_ignore_idx)
assign_result.set_extra_property('pos_predicted_boxes',
bbox_pred[indexes])
assign_result.set_extra_property('target_boxes',
gt_bboxes[pos_gt_index])
return assign_result
|
open-mmlab/mmdetection
|
mmdet/core/bbox/assigners/uniform_assigner.py
|
Python
|
apache-2.0
| 5,556
|
np.random.seed(1234)
fig, ax = plt.subplots(1)
x = 30*np.random.randn(10000)
mu = x.mean()
median = np.median(x)
sigma = x.std()
textstr = '$\mu=%.2f$\n$\mathrm{median}=%.2f$\n$\sigma=%.2f$'%(mu, median, sigma)
ax.hist(x, 50)
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
# place a text box in upper left in axes coords
ax.text(0.05, 0.95, textstr, transform=ax.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
|
leesavide/pythonista-docs
|
Documentation/matplotlib/users/recipes-8.py
|
Python
|
apache-2.0
| 493
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.jvm.artifact import Artifact
from pants.backend.jvm.ossrh_publication_metadata import (
Developer,
License,
OSSRHPublicationMetadata,
Scm,
)
from pants.backend.jvm.repository import Repository as repo
from pants.backend.jvm.scala_artifact import ScalaArtifact
from pants.backend.jvm.subsystems.jar_dependency_management import JarDependencyManagementSetup
from pants.backend.jvm.subsystems.scala_platform import ScalaPlatform
from pants.backend.jvm.subsystems.scoverage_platform import ScoveragePlatform
from pants.backend.jvm.subsystems.shader import Shading
from pants.backend.jvm.targets.annotation_processor import AnnotationProcessor
from pants.backend.jvm.targets.benchmark import Benchmark
from pants.backend.jvm.targets.credentials import LiteralCredentials, NetrcCredentials
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.java_agent import JavaAgent
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.targets.javac_plugin import JavacPlugin
from pants.backend.jvm.targets.junit_tests import JUnitTests
from pants.backend.jvm.targets.jvm_app import JvmApp
from pants.backend.jvm.targets.jvm_binary import Duplicate, JarRules, JvmBinary, Skip
from pants.backend.jvm.targets.jvm_prep_command import JvmPrepCommand
from pants.backend.jvm.targets.managed_jar_dependencies import (
ManagedJarDependencies,
ManagedJarLibraries,
)
from pants.backend.jvm.targets.scala_exclude import ScalaExclude
from pants.backend.jvm.targets.scala_jar_dependency import ScalaJarDependency
from pants.backend.jvm.targets.scala_library import ScalaLibrary
from pants.backend.jvm.targets.scalac_plugin import ScalacPlugin
from pants.backend.jvm.targets.unpacked_jars import UnpackedJars
from pants.backend.jvm.tasks.analysis_extraction import AnalysisExtraction
from pants.backend.jvm.tasks.benchmark_run import BenchmarkRun
from pants.backend.jvm.tasks.binary_create import BinaryCreate
from pants.backend.jvm.tasks.bootstrap_jvm_tools import BootstrapJvmTools
from pants.backend.jvm.tasks.bundle_create import BundleCreate
from pants.backend.jvm.tasks.check_published_deps import CheckPublishedDeps
from pants.backend.jvm.tasks.checkstyle import Checkstyle
from pants.backend.jvm.tasks.classmap import ClassmapTask
from pants.backend.jvm.tasks.consolidate_classpath import ConsolidateClasspath
from pants.backend.jvm.tasks.coursier_resolve import CoursierResolve
from pants.backend.jvm.tasks.detect_duplicates import DuplicateDetector
from pants.backend.jvm.tasks.ivy_imports import IvyImports
from pants.backend.jvm.tasks.ivy_outdated import IvyOutdated
from pants.backend.jvm.tasks.jar_create import JarCreate
from pants.backend.jvm.tasks.jar_publish import JarPublish
from pants.backend.jvm.tasks.javadoc_gen import JavadocGen
from pants.backend.jvm.tasks.junit_run import JUnitRun
from pants.backend.jvm.tasks.jvm_compile.javac.javac_compile import JavacCompile
from pants.backend.jvm.tasks.jvm_compile.jvm_classpath_publisher import RuntimeClasspathPublisher
from pants.backend.jvm.tasks.jvm_compile.rsc.rsc_compile import RscCompile
from pants.backend.jvm.tasks.jvm_dependency_check import JvmDependencyCheck
from pants.backend.jvm.tasks.jvm_dependency_usage import JvmDependencyUsage
from pants.backend.jvm.tasks.jvm_platform_analysis import JvmPlatformExplain, JvmPlatformValidate
from pants.backend.jvm.tasks.jvm_run import JvmRun
from pants.backend.jvm.tasks.nailgun_task import NailgunKillall
from pants.backend.jvm.tasks.prepare_resources import PrepareResources
from pants.backend.jvm.tasks.prepare_services import PrepareServices
from pants.backend.jvm.tasks.provide_tools_jar import ProvideToolsJar
from pants.backend.jvm.tasks.run_jvm_prep_command import (
RunBinaryJvmPrepCommand,
RunCompileJvmPrepCommand,
RunTestJvmPrepCommand,
)
from pants.backend.jvm.tasks.scala_repl import ScalaRepl
from pants.backend.jvm.tasks.scaladoc_gen import ScaladocGen
from pants.backend.jvm.tasks.scalafix_task import ScalaFixCheck, ScalaFixFix
from pants.backend.jvm.tasks.scalafmt_task import ScalaFmtCheckFormat, ScalaFmtFormat
from pants.backend.jvm.tasks.scalastyle_task import ScalastyleTask
from pants.backend.jvm.tasks.unpack_jars import UnpackJars
from pants.backend.project_info.tasks.export_dep_as_jar import ExportDepAsJar
from pants.build_graph.app_base import Bundle, DirectoryReMapper
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.goal.goal import Goal
from pants.goal.task_registrar import TaskRegistrar as task
from pants.java.jar.exclude import Exclude
from pants.java.jar.jar_dependency import JarDependencyParseContextWrapper
def build_file_aliases():
return BuildFileAliases(
targets={
"annotation_processor": AnnotationProcessor,
"benchmark": Benchmark,
"credentials": LiteralCredentials,
"jar_library": JarLibrary,
"java_agent": JavaAgent,
"java_library": JavaLibrary,
"javac_plugin": JavacPlugin,
"junit_tests": JUnitTests,
"jvm_app": JvmApp,
"jvm_binary": JvmBinary,
"jvm_prep_command": JvmPrepCommand,
"managed_jar_dependencies": ManagedJarDependencies,
"netrc_credentials": NetrcCredentials,
"scala_library": ScalaLibrary,
"scalac_plugin": ScalacPlugin,
"unpacked_jars": UnpackedJars,
},
objects={
"artifact": Artifact,
"scala_artifact": ScalaArtifact,
"ossrh": OSSRHPublicationMetadata,
"license": License,
"scm": Scm,
"developer": Developer,
"github": Scm.github,
"DirectoryReMapper": DirectoryReMapper,
"Duplicate": Duplicate,
"exclude": Exclude,
"scala_jar": ScalaJarDependency,
"scala_exclude": ScalaExclude,
"jar_rules": JarRules,
"repository": repo,
"Skip": Skip,
"shading_relocate": Shading.create_relocate,
"shading_exclude": Shading.create_exclude,
"shading_keep": Shading.create_keep,
"shading_zap": Shading.create_zap,
"shading_relocate_package": Shading.create_relocate_package,
"shading_exclude_package": Shading.create_exclude_package,
"shading_keep_package": Shading.create_keep_package,
"shading_zap_package": Shading.create_zap_package,
},
context_aware_object_factories={
"bundle": Bundle,
"jar": JarDependencyParseContextWrapper,
"managed_jar_libraries": ManagedJarLibraries,
},
)
def global_subsystems():
return (
ScalaPlatform,
ScoveragePlatform,
)
# TODO https://github.com/pantsbuild/pants/issues/604 register_goals
def register_goals():
ng_killall = task(name="ng-killall", action=NailgunKillall)
ng_killall.install()
Goal.by_name("invalidate").install(ng_killall, first=True)
Goal.by_name("clean-all").install(ng_killall, first=True)
task(name="jar-dependency-management", action=JarDependencyManagementSetup).install("bootstrap")
task(name="jvm-platform-explain", action=JvmPlatformExplain).install("jvm-platform-explain")
task(name="jvm-platform-validate", action=JvmPlatformValidate).install("jvm-platform-validate")
task(name="bootstrap-jvm-tools", action=BootstrapJvmTools).install("bootstrap")
task(name="provide-tools-jar", action=ProvideToolsJar).install("bootstrap")
# Compile
task(name="rsc", action=RscCompile).install("compile")
task(name="javac", action=JavacCompile).install("compile")
# Analysis extraction.
task(name="zinc", action=AnalysisExtraction).install("analysis")
# Dependency resolution.
task(name="coursier", action=CoursierResolve).install("resolve")
task(name="ivy-imports", action=IvyImports).install("imports")
task(name="unpack-jars", action=UnpackJars).install()
task(name="ivy", action=IvyOutdated).install("outdated")
# Resource preparation.
task(name="prepare", action=PrepareResources).install("resources")
task(name="services", action=PrepareServices).install("resources")
task(name="export-classpath", action=RuntimeClasspathPublisher).install()
# This goal affects the contents of the runtime_classpath, and should not be
# combined with any other goals on the command line.
task(name="export-dep-as-jar", action=ExportDepAsJar).install()
task(name="jvm", action=JvmDependencyUsage).install("dep-usage")
task(name="classmap", action=ClassmapTask).install("classmap")
# Generate documentation.
task(name="javadoc", action=JavadocGen).install("doc")
task(name="scaladoc", action=ScaladocGen).install("doc")
# Bundling.
task(name="create", action=JarCreate).install("jar")
detect_duplicates = task(name="dup", action=DuplicateDetector)
task(name="jvm", action=BinaryCreate).install("binary")
detect_duplicates.install("binary")
task(name="consolidate-classpath", action=ConsolidateClasspath).install("bundle")
task(name="jvm", action=BundleCreate).install("bundle")
detect_duplicates.install("bundle")
task(name="detect-duplicates", action=DuplicateDetector).install()
# Publishing.
task(name="check-published-deps", action=CheckPublishedDeps).install("check-published-deps")
task(name="jar", action=JarPublish).install("publish")
# Testing.
task(name="junit", action=JUnitRun).install("test")
task(name="bench", action=BenchmarkRun).install("bench")
# Linting.
task(name="scalafix", action=ScalaFixCheck).install("lint")
task(name="scalafmt", action=ScalaFmtCheckFormat, serialize=False).install("lint")
task(name="scalastyle", action=ScalastyleTask, serialize=False).install("lint")
task(name="checkstyle", action=Checkstyle, serialize=False).install("lint")
task(name="jvm-dep-check", action=JvmDependencyCheck, serialize=False).install("lint")
# Formatting.
# Scalafix has to go before scalafmt in order not to
# further change Scala files after scalafmt.
task(name="scalafix", action=ScalaFixFix).install("fmt")
task(name="scalafmt", action=ScalaFmtFormat, serialize=False).install("fmt")
# Running.
task(name="jvm", action=JvmRun, serialize=False).install("run")
task(name="jvm-dirty", action=JvmRun, serialize=False).install("run-dirty")
task(name="scala", action=ScalaRepl, serialize=False).install("repl")
task(name="scala-dirty", action=ScalaRepl, serialize=False).install("repl-dirty")
task(name="test-jvm-prep-command", action=RunTestJvmPrepCommand).install("test", first=True)
task(name="binary-jvm-prep-command", action=RunBinaryJvmPrepCommand).install(
"binary", first=True
)
task(name="compile-jvm-prep-command", action=RunCompileJvmPrepCommand).install(
"compile", first=True
)
|
wisechengyi/pants
|
src/python/pants/backend/jvm/register.py
|
Python
|
apache-2.0
| 11,163
|