code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2008 Gary Burton
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Option class representing a list of places.
"""
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from . import Option
#-------------------------------------------------------------------------
#
# PlaceListOption class
#
#-------------------------------------------------------------------------
class PlaceListOption(Option):
"""
This class describes a widget that allows multiple places from the
database to be selected.
"""
def __init__(self, label):
"""
:param label: A label to be applied to this option.
Example: "Places"
:type label: string
:param value: A set of GIDs as initial values for this option.
Example: "111 222 333 444"
:type value: string
:return: nothing
"""
Option.__init__(self, label, "")
|
pmghalvorsen/gramps_branch
|
gramps/gen/plug/menu/_placelist.py
|
Python
|
gpl-2.0
| 1,794
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django import VERSION
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'SignalDeferredDispatch'
db.create_table(u'dbmail_signaldeferreddispatch', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('args', self.gf('django.db.models.fields.TextField')()),
('kwargs', self.gf('django.db.models.fields.TextField')()),
('params', self.gf('django.db.models.fields.TextField')()),
('eta', self.gf('django.db.models.fields.DateTimeField')(db_index=True)),
('done', self.gf('django.db.models.fields.NullBooleanField')(default=None, null=True, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'dbmail', ['SignalDeferredDispatch'])
# Adding index on 'SignalDeferredDispatch', fields ['eta', 'done']
db.create_index(u'dbmail_signaldeferreddispatch', ['eta', 'done'])
def backwards(self, orm):
# Removing index on 'SignalDeferredDispatch', fields ['eta', 'done']
db.delete_index(u'dbmail_signaldeferreddispatch', ['eta', 'done'])
# Deleting model 'SignalDeferredDispatch'
db.delete_table(u'dbmail_signaldeferreddispatch')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'dbmail.apikey': {
'Meta': {'object_name': 'ApiKey'},
'api_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'dbmail.mailbcc': {
'Meta': {'object_name': 'MailBcc'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'dbmail.mailcategory': {
'Meta': {'object_name': 'MailCategory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '25'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'dbmail.mailfile': {
'Meta': {'object_name': 'MailFile'},
'filename': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'files'", 'to': u"orm['dbmail.MailTemplate']"})
},
u'dbmail.mailfromemail': {
'Meta': {'object_name': 'MailFromEmail'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'credential': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['dbmail.MailFromEmailCredential']", 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'dbmail.mailfromemailcredential': {
'Meta': {'object_name': 'MailFromEmailCredential'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fail_silently': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'host': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'port': ('django.db.models.fields.PositiveIntegerField', [], {}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'use_tls': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
u'dbmail.mailgroup': {
'Meta': {'object_name': 'MailGroup'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'dbmail.mailgroupemail': {
'Meta': {'unique_together': "(('email', 'group'),)", 'object_name': 'MailGroupEmail'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'emails'", 'to': u"orm['dbmail.MailGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'dbmail.maillog': {
'Meta': {'object_name': 'MailLog'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'error_exception': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dbmail.MailLogException']", 'null': 'True', 'blank': 'True'}),
'error_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_sent': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'num_of_retries': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dbmail.MailTemplate']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'dbmail.maillogemail': {
'Meta': {'object_name': 'MailLogEmail'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'log': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dbmail.MailLog']"}),
'mail_type': ('django.db.models.fields.CharField', [], {'max_length': '3'})
},
u'dbmail.maillogexception': {
'Meta': {'object_name': 'MailLogException'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '150'})
},
u'dbmail.mailtemplate': {
'Meta': {'object_name': 'MailTemplate'},
'bcc_email': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['dbmail.MailBcc']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['dbmail.MailCategory']", 'null': 'True', 'blank': 'True'}),
'context_note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'enable_log': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'from_email': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['dbmail.MailFromEmail']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interval': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_html': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'num_of_retries': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'priority': ('django.db.models.fields.SmallIntegerField', [], {'default': '6'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'dbmail.signal': {
'Meta': {'object_name': 'Signal'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dbmail.MailGroup']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interval': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'model': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'receive_once': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rules': ('django.db.models.fields.TextField', [], {'default': "'{{ instance.email }}'", 'null': 'True', 'blank': 'True'}),
'signal': ('django.db.models.fields.CharField', [], {'default': "'post_save'", 'max_length': '15'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dbmail.MailTemplate']"}),
'update_model': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'dbmail.signaldeferreddispatch': {
#'Meta': {'object_name': 'SignalDeferredDispatch', 'index_together': "(('eta', 'done'),)"},
'args': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'done': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'eta': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kwargs': ('django.db.models.fields.TextField', [], {}),
'params': ('django.db.models.fields.TextField', [], {})
},
u'dbmail.signallog': {
'Meta': {'object_name': 'SignalLog'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'model_pk': ('django.db.models.fields.BigIntegerField', [], {}),
'signal': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dbmail.Signal']"})
}
}
if VERSION >= (1, 5):
models[u'dbmail.signaldeferreddispatch']['Meta'] = {'object_name': 'SignalDeferredDispatch', 'index_together': "(('eta', 'done'),)"}
else:
models[u'dbmail.signaldeferreddispatch']['Meta'] = {'object_name': 'SignalDeferredDispatch'}
complete_apps = ['dbmail']
|
ilstreltsov/django-db-mailer
|
dbmail/south_migrations/0032_auto__add_signaldeferreddispatch__add_index_signaldeferreddispatch_eta.py
|
Python
|
gpl-2.0
| 16,851
|
"""
The OpenMP package does contain all code templates required for the openMP
code generation in ANNarchy.
BaseTemplates:
defines the basic defintions common to all sparse matrix formates, e. g. projection header
[FORMAT]_SingleThread:
defines the format specific defintions for the currently available formats:
* LIL: list-in-list
* COO: coordinate
* CSR: compressed sparse row
* ELL: ELLPACK/ITPACK
* ELL-R: ELLPACK format with row-length array
* Dense: a full matrix representation
there are some special purpose implementations:
* CSR_T: compressed sparse row (transposed)
* LIL_P: a partitioned LIL representation
"""
from . import LIL as LIL_OpenMP
from . import LIL_P as LIL_Sliced_OpenMP
from . import COO as COO_OpenMP
from . import CSR as CSR_OpenMP
from . import CSR_T as CSR_T_OpenMP
from . import CSR_T_P as CSR_T_Sliced_OpenMP
from . import ELL as ELL_OpenMP
from . import ELLR as ELLR_OpenMP
from . import Dense as Dense_OpenMP
__all__ = ["BaseTemplates", "LIL_OpenMP", "LIL_Sliced_OpenMP", "COO_OpenMP", "CSR_OpenMP", "CSR_T_OpenMP", "CSR_T_Sliced_OpenMP", "ELL_OpenMP", "ELLR_OpenMP", "Dense_OpenMP"]
|
vitay/ANNarchy
|
ANNarchy/generator/Projection/OpenMP/__init__.py
|
Python
|
gpl-2.0
| 1,202
|
import urllib.request
import time
preço = 99.99 #algum valor maior
while preço >= 4.74:
pagina = urllib.request.urlopen(
'http://beans.itcarlow.ie/prices-loyalty.html')
texto = pagina.read().decode('utf8')
onde = texto.find('>$')
início = onde + 2
fim = início + 4
preço = float(texto[início:fim])
if preço >= 4.74:
print ('Espera...')
time.sleep(600)
print ('Comprar! Preço: %5.2f' %preço)
|
wsricardo/mcestudos
|
treinamento-webScraping/Abraji/p11.py
|
Python
|
gpl-3.0
| 468
|
# Copyright (c) 2003-2015 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
import pcapy
import socket
import time
from random import randint
from mitmflib.impacket import structure, ImpactDecoder
class BootpPacket(structure.Structure):
commonHdr = (
('op','b'),
('htype','b=1'), # 1 = Ether
('hlen','b=len(chaddr)'),
('hops','b=0'),
('xid','!L=0'),
('secs','!H=0'),
('flags','!H=0'),
('ciaddr','!L=0'),
('yiaddr','!L=0'),
('siaddr','!L=0'),
('giaddr','!L=0'),
('_chaddr','16s=chaddr'),
('chaddr','_','_chaddr[:hlen]'),
('sname','64s=""'),
('file','128s=""'))
#def __init__(self, data = None, alignment = 0):
# structure.Structure.__init__(self, data, alignment)
class DhcpPacket(BootpPacket):
# DHCP: http://www.faqs.org/rfcs/rfc2131.html
# DHCP Options: http://www.faqs.org/rfcs/rfc1533.html
# good list of options: http://www.networksorcery.com/enp/protocol/bootp/options.htm
BOOTREQUEST = 1
BOOTREPLY = 2
DHCPDISCOVER= 1
DHCPOFFER = 2
DHCPREQUEST = 3
DHCPDECLINE = 4
DHCPACK = 5
DHCPNAK = 6
DHCPRELEASE = 7
DHCPINFORM = 8
options = {
# 3. Vendor Extensions
'pad':(0,'_'),
'subnet-mask':(1,'!L'),
'time-offset':(2,'!L'),
'router':(3,'*!L'),
'time-server':(4,'*!L'),
'name-server':(5,'*!L'),
'domain-name-server':(6,'*!L'),
'log-server':(7,'*!L'),
'cookie-server':(8,'*!L'),
'lpr-server':(9,'*!L'),
'impress-server':(10,'*!L'),
'resource-locator-server':(11,'*!L'),
'host-name':(12,':'),
'boot-file-size':(13,'!H'),
'merit-dump-file':(14,':'),
'domain-name':(15,':'),
'swap-server':(16,':'),
'root-path':(17,':'),
'extensions-path':(18,':'),
# 4. IP Layer Parameters per Host
'ip-forwarding':(19,'B'),
'non-local-source-routing':(20,'B'),
'policy-filter':(21,'*!L'),
'maximum-datagram-reassembly-size':(22,'!H'),
'default-ip-ttl':(23,'B'),
'path-mtu-aging-timeout':(24,'!L'),
'path-mtu-plateau-table':(25,'*!H'),
# 5. IP Layer Parameters per Interface
'interface-mtu':(26,'!H'),
'all-subnets-are-local':(27,'B'),
'broadcast-address':(28,'!L'),
'perform-mask-discovery':(29,'B'),
'mask-supplier':(30,'B'),
'perform-router-discovery':(31,'B'),
'router-solicitation-address':(32,'!L'),
'static-route':(33,'*!L'),
# 6. Link Layer Parameters per Interface
'trailer-encapsulation':(34,'B'),
'arp-cache-timeout':(35,'!L'),
'ethernet-encapsulation':(36,'B'),
# 7. TCP parameters
'tcp-default-ttl':(37,'B'),
'tcp-keepalive-interval':(38,'!L'),
'tcp-keepalive-garbage':(39,'B'),
# 8. Application and Service parameters
'nis-domain':(40,':'),
'nis-servers':(41,'*!L'),
'ntp-servers':(42,'*!L'),
'vendor-specific':(43,':'),
'netbios-name-server':(44,'*!L'),
'netbios-datagrame-distribution-server':(45,'*!L'),
'netbios-node-type':(46,'B'),
'netbios-scope':(47,':'),
'x11-font-server':(48,'*!L'),
'x11-display-manager':(49,'*!L'),
# 9. DHCP Extensions
'requested-ip':(50,'!L'),
'lease-time':(51,'!L'),
'option-overload':(52,'B'),
'message-type':(53,'B'),
'server-id':(54,'!L'),
'parameter-request-list':(55,':'),
'message':(56,':'),
'maximum-dhcp-message-size':(57,'!H'),
'renewal-time':(58,'!L'),
'rebinding-time':(59,'!L'),
'vendor-class':(60,':'),
'client-id':(61,':'),
# other non-rfc1533 options
'slp-directory-agent':(78,':'), # http://www.ietf.org/rfc/rfc2610.txt
'slp-service-scope':(79,':'), # http://www.ietf.org/rfc/rfc2610.txt
'fully-qualified-domain-name':(81,':'), # http://www.ietf.org/rfc/rfc4702.txt
'auto-configuration':(116,'B'), # http://www.ietf.org/rfc/rfc2563.txt
'domain-search-list':(119,'B'), # http://www.ietf.org/rfc/rfc3397.txt
'classless-route-121':(121, ':'), # http://www.ietf.org/rfc/rfc3442.txt
'classless-route-249':(249, ':'), # http://support.microsoft.com/kb/121005
'proxy-autoconfig':(252,':'),
'eof':(255,'_'),
}
structure = (
('cookie','!L'),
('_options',':=self.packOptions(options)'),
('options','_','self.unpackOptions(_options)'))
#def __init__(self, data = None, alignment = 0):
# BootpPacket.__init__(self, data, alignment)
def packOptions(self, options):
# options is an array of tuples: ('name',value)
answer = ''
for name, value in options:
code,format = self.options[name]
val = self.pack(format, value)
answer += '%c%c%s' % (code, len(val), val)
return answer
def getOptionNameAndFormat(self, optionCode):
for k in self.options:
code,format = self.options[k]
if code == optionCode: return k, format
return optionCode, ':'
def unpackOptions(self, options):
# options is a string
# print '%r' % options
answer = []
i = 0
while i < len(options)-1:
name, format = self.getOptionNameAndFormat(ord(options[i]))
# size = self.calcUnpackSize(format, options[i+1:])
size = ord(options[i+1])
# print i, name, format, size
value = self.unpack(format, options[i+2:i+2+size])
answer.append((name, value))
i += 2+size
return answer
def unpackParameterRequestList(self, options):
return [self.getOptionNameAndFormat(ord(opt))[0] for opt in options]
def isAskingForProxyAutodiscovery(self):
for opt in self.fields['options']:
if opt[0] == 'parameter-request-list':
for optCode in opt[1]:
if ord(optCode) == 252:
return True
return False
def getOptionValue(self, name):
for opt in self.fields['options']:
if opt[0] == name:
return opt[1]
return None
class DHCPTool:
def initialize(self):
self.pcap = pcapy.open_live(pcapy.lookupdev(), -1, 1, 1)
self.pcap.setfilter("port 67", 1, 0xffffff00)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.connect(('192.168.1.1',67))
self.decoder = ImpactDecoder.EthDecoder()
def targetRun(self):
for i in range(1,254):
self.sendDISCOVER('12345%c' % i, ip = '192.168.1.%d' % i)
self.processPacketsForOneSecond()
def finalize(self):
self.pcap.close()
Module.finalize(self)
def processPacketsForOneSecond(self):
t = time.time()
while time.time()-t < 1:
p = self.pcap.next()
if p[1][2]:
pp = self.decoder.decode(p[0])
print pp
def sendDHCP(self, type, chaddr, hostname = None, ip = None, xid = None,opts = []):
p = DhcpPacket()
opt = [('message-type',type)] + list(opts)
if xid is None:
xid = randint(0,0xffffffff)
if ip:
ip = structure.unpack('!L',socket.inet_aton(ip))[0]
p['ciaddr'] = ip
opt.append(('requested-ip',ip))
if hostname is not None:
for i in range(0,len(hostname),255):
opt.append(('host-name',hostname[i:i+255]))
p['op'] = p.BOOTREQUEST
p['xid'] = xid
p['chaddr'] = chaddr
p['cookie'] = 0x63825363
p['options'] = opt
self.sock.send(str(p))
def sendDISCOVER(self, chaddr, hostname = None, ip = None,xid = 0x12345678):
print 'DHCPDISCOVER: %s' % ip
self.sendDHCP(DhcpPacket.DHCPDISCOVER, chaddr, hostname, ip, xid)
|
CiuffysHub/MITMf
|
mitmflib-0.18.4/mitmflib/impacket/dhcp.py
|
Python
|
gpl-3.0
| 8,466
|
from parso.python import tree
from parso.python.token import PythonTokenTypes
from parso.parser import BaseParser
NAME = PythonTokenTypes.NAME
INDENT = PythonTokenTypes.INDENT
DEDENT = PythonTokenTypes.DEDENT
class Parser(BaseParser):
"""
This class is used to parse a Python file, it then divides them into a
class structure of different scopes.
:param pgen_grammar: The grammar object of pgen2. Loaded by load_grammar.
"""
node_map = {
'expr_stmt': tree.ExprStmt,
'classdef': tree.Class,
'funcdef': tree.Function,
'file_input': tree.Module,
'import_name': tree.ImportName,
'import_from': tree.ImportFrom,
'break_stmt': tree.KeywordStatement,
'continue_stmt': tree.KeywordStatement,
'return_stmt': tree.ReturnStmt,
'raise_stmt': tree.KeywordStatement,
'yield_expr': tree.YieldExpr,
'del_stmt': tree.KeywordStatement,
'pass_stmt': tree.KeywordStatement,
'global_stmt': tree.GlobalStmt,
'nonlocal_stmt': tree.KeywordStatement,
'print_stmt': tree.KeywordStatement,
'assert_stmt': tree.AssertStmt,
'if_stmt': tree.IfStmt,
'with_stmt': tree.WithStmt,
'for_stmt': tree.ForStmt,
'while_stmt': tree.WhileStmt,
'try_stmt': tree.TryStmt,
'sync_comp_for': tree.SyncCompFor,
# Not sure if this is the best idea, but IMO it's the easiest way to
# avoid extreme amounts of work around the subtle difference of 2/3
# grammar in list comoprehensions.
'decorator': tree.Decorator,
'lambdef': tree.Lambda,
'lambdef_nocond': tree.Lambda,
'namedexpr_test': tree.NamedExpr,
}
default_node = tree.PythonNode
# Names/Keywords are handled separately
_leaf_map = {
PythonTokenTypes.STRING: tree.String,
PythonTokenTypes.NUMBER: tree.Number,
PythonTokenTypes.NEWLINE: tree.Newline,
PythonTokenTypes.ENDMARKER: tree.EndMarker,
PythonTokenTypes.FSTRING_STRING: tree.FStringString,
PythonTokenTypes.FSTRING_START: tree.FStringStart,
PythonTokenTypes.FSTRING_END: tree.FStringEnd,
}
def __init__(self, pgen_grammar, error_recovery=True, start_nonterminal='file_input'):
super().__init__(pgen_grammar, start_nonterminal,
error_recovery=error_recovery)
self.syntax_errors = []
self._omit_dedent_list = []
self._indent_counter = 0
def parse(self, tokens):
if self._error_recovery:
if self._start_nonterminal != 'file_input':
raise NotImplementedError
tokens = self._recovery_tokenize(tokens)
return super().parse(tokens)
def convert_node(self, nonterminal, children):
"""
Convert raw node information to a PythonBaseNode instance.
This is passed to the parser driver which calls it whenever a reduction of a
grammar rule produces a new complete node, so that the tree is build
strictly bottom-up.
"""
try:
node = self.node_map[nonterminal](children)
except KeyError:
if nonterminal == 'suite':
# We don't want the INDENT/DEDENT in our parser tree. Those
# leaves are just cancer. They are virtual leaves and not real
# ones and therefore have pseudo start/end positions and no
# prefixes. Just ignore them.
children = [children[0]] + children[2:-1]
node = self.default_node(nonterminal, children)
for c in children:
c.parent = node
return node
def convert_leaf(self, type, value, prefix, start_pos):
# print('leaf', repr(value), token.tok_name[type])
if type == NAME:
if value in self._pgen_grammar.reserved_syntax_strings:
return tree.Keyword(value, start_pos, prefix)
else:
return tree.Name(value, start_pos, prefix)
return self._leaf_map.get(type, tree.Operator)(value, start_pos, prefix)
def error_recovery(self, token):
tos_nodes = self.stack[-1].nodes
if tos_nodes:
last_leaf = tos_nodes[-1].get_last_leaf()
else:
last_leaf = None
if self._start_nonterminal == 'file_input' and \
(token.type == PythonTokenTypes.ENDMARKER
or token.type == DEDENT and not last_leaf.value.endswith('\n')
and not last_leaf.value.endswith('\r')):
# In Python statements need to end with a newline. But since it's
# possible (and valid in Python) that there's no newline at the
# end of a file, we have to recover even if the user doesn't want
# error recovery.
if self.stack[-1].dfa.from_rule == 'simple_stmt':
try:
plan = self.stack[-1].dfa.transitions[PythonTokenTypes.NEWLINE]
except KeyError:
pass
else:
if plan.next_dfa.is_final and not plan.dfa_pushes:
# We are ignoring here that the newline would be
# required for a simple_stmt.
self.stack[-1].dfa = plan.next_dfa
self._add_token(token)
return
if not self._error_recovery:
return super().error_recovery(token)
def current_suite(stack):
# For now just discard everything that is not a suite or
# file_input, if we detect an error.
for until_index, stack_node in reversed(list(enumerate(stack))):
# `suite` can sometimes be only simple_stmt, not stmt.
if stack_node.nonterminal == 'file_input':
break
elif stack_node.nonterminal == 'suite':
# In the case where we just have a newline we don't want to
# do error recovery here. In all other cases, we want to do
# error recovery.
if len(stack_node.nodes) != 1:
break
return until_index
until_index = current_suite(self.stack)
if self._stack_removal(until_index + 1):
self._add_token(token)
else:
typ, value, start_pos, prefix = token
if typ == INDENT:
# For every deleted INDENT we have to delete a DEDENT as well.
# Otherwise the parser will get into trouble and DEDENT too early.
self._omit_dedent_list.append(self._indent_counter)
error_leaf = tree.PythonErrorLeaf(typ.name, value, start_pos, prefix)
self.stack[-1].nodes.append(error_leaf)
tos = self.stack[-1]
if tos.nonterminal == 'suite':
# Need at least one statement in the suite. This happend with the
# error recovery above.
try:
tos.dfa = tos.dfa.arcs['stmt']
except KeyError:
# We're already in a final state.
pass
def _stack_removal(self, start_index):
all_nodes = [node for stack_node in self.stack[start_index:] for node in stack_node.nodes]
if all_nodes:
node = tree.PythonErrorNode(all_nodes)
for n in all_nodes:
n.parent = node
self.stack[start_index - 1].nodes.append(node)
self.stack[start_index:] = []
return bool(all_nodes)
def _recovery_tokenize(self, tokens):
for token in tokens:
typ = token[0]
if typ == DEDENT:
# We need to count indents, because if we just omit any DEDENT,
# we might omit them in the wrong place.
o = self._omit_dedent_list
if o and o[-1] == self._indent_counter:
o.pop()
self._indent_counter -= 1
continue
self._indent_counter -= 1
elif typ == INDENT:
self._indent_counter += 1
yield token
|
snakeleon/YouCompleteMe-x64
|
third_party/ycmd/third_party/jedi_deps/parso/parso/python/parser.py
|
Python
|
gpl-3.0
| 8,227
|
# This file is part of Lerot.
#
# Lerot is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Lerot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Lerot. If not, see <http://www.gnu.org/licenses/>.
from utils import get_class
class AbstractRankingFunction:
"""Abstract base class for ranking functions."""
def __init__(self,
ranker_arg_str,
ties,
feature_count,
init=None,
sample=None):
self.feature_count = feature_count
ranking_model_str = "ranker.model.Linear"
for arg in ranker_arg_str:
if arg.startswith("ranker.model"):
ranking_model_str = arg
else:
self.ranker_type = float(arg)
self.ranking_model = get_class(ranking_model_str)(feature_count)
self.sample = getattr(__import__("utils"), sample)
self.ties = ties
self.w = self.ranking_model.initialize_weights(init)
def score(self, features):
return self.ranking_model.score(features, self.w.transpose())
def get_candidate_weight(self, delta):
u = self.sample(self.ranking_model.get_feature_count())
return self.w + delta * u, u
def init_ranking(self, query):
raise NotImplementedError("Derived class needs to implement "
"init_ranking.")
def next(self):
raise NotImplementedError("Derived class needs to implement "
"next.")
def next_det(self):
raise NotImplementedError("Derived class needs to implement "
"next_det.")
def next_random(self):
raise NotImplementedError("Derived class needs to implement "
"next_random.")
def get_document_probability(self, docid):
raise NotImplementedError("Derived class needs to implement "
"get_document_probability.")
def getDocs(self, numdocs=None):
if numdocs != None:
return self.docids[:numdocs]
return self.docids
def rm_document(self, docid):
raise NotImplementedError("Derived class needs to implement "
"rm_document.")
def document_count(self):
raise NotImplementedError("Derived class needs to implement "
"document_count.")
def update_weights(self, w, alpha=None):
"""update weight vector"""
if alpha == None:
self.w = w
else:
self.w = self.w + alpha * w
|
hubert667/AIR
|
src/python/ranker/AbstractRankingFunction.py
|
Python
|
gpl-3.0
| 2,935
|
# Copyright (C) 2015 Custodia Project Contributors - see LICENSE file
import json
from six import string_types
from custodia.message.common import InvalidMessage
from custodia.message.common import MessageHandler
class SimpleKey(MessageHandler):
"""Handles 'simple' messages"""
def parse(self, msg, name):
"""Parses a simple message
:param req: ignored
:param msg: the json-decoded value
:raises UnknownMessageType: if the type is not 'simple'
:raises InvalidMessage: if the message cannot be parsed or validated
"""
# On requests we imply 'simple' if there is no input message
if msg is None:
return
if not isinstance(msg, string_types):
raise InvalidMessage("The 'value' attribute is not a string")
self.name = name
self.payload = msg
def reply(self, output):
if self.name.endswith('/'):
# directory listings are pass-through with simple messages
return output
return json.dumps({'type': 'simple', 'value': output},
separators=(',', ':'))
|
simo5/custodia
|
custodia/message/simple.py
|
Python
|
gpl-3.0
| 1,145
|
import math as m
def near_to_target(lbot, targetX, targetY, nearDist = 50):
x, y, alpha = lbot.getPose()
distToTarget = m.sqrt(m.pow(x-targetX, 2) + m.pow(y-targetY, 2))
if distToTarget <= nearDist:
return True
return False
|
robocomp/learnbot
|
learnbot_dsl/functions/proprioceptive/base/near_to_target.py
|
Python
|
gpl-3.0
| 232
|
import sys,os
import numpy as np
#os.environ["EPICS_CA_AUTO_ADDR_LIST"] = "NO"
#os.environ["EPICS_CA_ADDR_LIST"] = "192.168.82.10"
#os.environ["EPICS_CA_MAX_ARRAY_BYTES"] = "10000000000"
import velaINJMagnetControl as VIMC
a = VIMC.velaINJMagnetController(True,False)
print( np.array(a.getQuadNames()))
|
mothyjohn/VELA-CLARA-Controllers
|
General/enums/bin/Release/test.py
|
Python
|
gpl-3.0
| 307
|
'''
This file must hold keys for translatable messages
that are used as variables
it is important that a dummy _() function is used here
this way message key will be pulled into django.po
and can still be used as a variable in python files.
'''
_ = lambda v:v
#NOTE: all strings must be explicitly put into this dictionary,
#because you don't want to import _ from here with import *
__all__ = []
#messages loaded in the templates via direct _ calls
_('most relevant questions')
_('click to see most relevant questions')
_('by relevance')
_('click to see the oldest questions')
_('by date')
_('click to see the newest questions')
_('click to see the least recently updated questions')
_('by activity')
_('click to see the most recently updated questions')
_('click to see the least answered questions')
_('by answers')
_('click to see the most answered questions')
_('click to see least voted questions')
_('by votes')
_('click to see most voted questions')
_('interesting')
_('ignored')
_('subscribed')
TAGS_ARE_REQUIRED_MESSAGE = _('tags are required')
TAG_WRONG_CHARS_MESSAGE = _(
'please use letters, numbers and characters "-+.#"'
)
TAG_WRONG_FIRST_CHAR_MESSAGE = _(
'# is not a valid character at the beginning of tags, use only letters and numbers'
)
ACCOUNT_CANNOT_PERFORM_ACTION = _(
'Sorry, you cannot %(perform_action)s because %(your_account_is)s'
)
MIN_REP_REQUIRED_TO_PERFORM_ACTION = _('>%(min_rep)s points required to %(perform_action)s')
CANNOT_PERFORM_ACTION_UNTIL = _('Sorry, you will be able to %(perform_action)s after %(until)s')
MODERATORS_OR_AUTHOR_CAN_PEFROM_ACTION = _(
'Sorry, only moderators or the %(post_author)s %(perform_action)s'
)
|
PearsonIOKI/compose-forum
|
askbot/const/message_keys.py
|
Python
|
gpl-3.0
| 1,683
|
import json
import sys
import requests
from collections import Counter
from wapy.api import Wapy
from http.server import BaseHTTPRequestHandler, HTTPServer
wapy = Wapy('frt6ajvkqm4aexwjksrukrey')
def removes(yes):
no = ["Walmart.com", ".", ","]
for x in no:
yes = yes.replace(x, '')
return yes
def post_some_dict(dict):
headers = {'Content-type': 'application/json'}
r = requests.post("http://127.0.0.1:5000/search", data=json.dumps(dict), headers=headers)
return r.text
def parse_image(image):
out = json.loads(post_some_dict({"image_url": image}))['titles']
print(out)
#out = [x for x in out if 'walmart' in x]
threshold = len(out)-1
#out = [x[27:-9] for x in out]
#print(out)
large = []
for line in out:
line = line.replace('-', '')
line = removes(line)
line = line.split(' ')
for word in line:
large.append(word)
#print(large)
c = Counter(large).most_common()
keywords = []
for x in c:
if x[1] > threshold:
keywords.append(x[0])
print(keywords)
return ' '.join(keywords)
def parse_wallmart(keywords):
products = wapy.search(' '.join(keywords))
out = {}
out['name'] = products[0].name
out['rating'] = products[0].customer_rating
out['price'] = products[0].sale_price
return json.dumps(out)
class StoreHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-type', 'text/json')
self.end_headers()
self.wfile.write(fh.read().encode())
def do_POST(self):
self.send_response(200)
length = self.headers['content-length']
data = self.rfile.read(int(length))
with open('/var/www/html/image.jpg', 'wb') as fh:
fh.write(data)
self.send_header('Content-type', 'text/json')
self.end_headers()
self.wfile.write(parse_wallmart(parse_image('http://45.33.95.66/image.jpg')).encode())
server = HTTPServer(('', 8081), StoreHandler)
server.serve_forever()
|
Pennapps-XV/backend
|
root/parse-server.py
|
Python
|
gpl-3.0
| 2,081
|
import sys
import unittest
import cbind
cbind.choose_cindex_impl(cbind.CLANG_CINDEX)
import suite_all
if __name__ == '__main__':
runner = unittest.TextTestRunner()
sys.exit(not runner.run(suite_all.suite_all).wasSuccessful())
|
anthrotype/ctypes-binding-generator
|
test/suite_clang_cindex.py
|
Python
|
gpl-3.0
| 238
|
# -*- encoding: utf-8 -*-
from abjad.tools.durationtools import Duration
from abjad.tools.rhythmtreetools import RhythmTreeContainer, RhythmTreeLeaf
def test_rhythmtreetools_RhythmTreeNode_duration_01():
tree = RhythmTreeContainer(preprolated_duration=1, children=[
RhythmTreeLeaf(preprolated_duration=1),
RhythmTreeContainer(preprolated_duration=2, children=[
RhythmTreeLeaf(preprolated_duration=3),
RhythmTreeLeaf(preprolated_duration=2)
]),
RhythmTreeLeaf(preprolated_duration=2)
])
assert tree.duration == Duration(1)
assert tree[0].duration == Duration(1, 5)
assert tree[1].duration == Duration(2, 5)
assert tree[1][0].duration == Duration(6, 25)
assert tree[1][1].duration == Duration(4, 25)
assert tree[2].duration == Duration(2, 5)
tree[1].append(tree.pop())
assert tree.duration == Duration(1)
assert tree[0].duration == Duration(1, 3)
assert tree[1].duration == Duration(2, 3)
assert tree[1][0].duration == Duration(2, 7)
assert tree[1][1].duration == Duration(4, 21)
assert tree[1][2].duration == Duration(4, 21)
tree.preprolated_duration = 19
assert tree.duration == Duration(19)
assert tree[0].duration == Duration(19, 3)
assert tree[1].duration == Duration(38, 3)
assert tree[1][0].duration == Duration(38, 7)
assert tree[1][1].duration == Duration(76, 21)
assert tree[1][2].duration == Duration(76, 21)
|
mscuthbert/abjad
|
abjad/tools/rhythmtreetools/test/test_rhythmtreetools_RhythmTreeNode_duration.py
|
Python
|
gpl-3.0
| 1,469
|
import os
import sys
HOSTNAME = os.environ.get('QLF_HOSTNAME', 'localhost')
BASE_DIR = os.path.dirname(
os.path.dirname(os.path.abspath(__file__))
)
SECRET_KEY = os.environ.get('SECRET_KEY', os.urandom(32))
ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS', HOSTNAME).split(',')
SITE_PAGES_DIRECTORY = os.path.join(BASE_DIR, 'layouts')
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'dashboard',
'channels',
'ui_channel',
'corsheaders',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'corsheaders.middleware.CorsMiddleware',
)
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_CREDENTIALS = True
INTERNAL_IPS = os.environ.get('INTERNAL_IPS', '127.0.0.1')
ROOT_URLCONF = 'dashboard.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'dashboard/templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'qlf.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
# USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
DEBUG = os.environ.get('DEBUG', 'True') == 'True'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
# 'ENGINE': 'django_postgrespool',
'NAME': os.environ.get('POSTGRES_DB', 'dbqlf'),
'USER': os.environ.get('POSTGRES_USER', 'userqlf'),
'PASSWORD': os.environ.get('POSTGRES_PASSWORD', 'qlfuser'),
'HOST': os.environ.get('DB_NAME', 'db'),
'OPTIONS': {
'options': os.environ.get('POSTGRES_OPTIONS', '')
},
'PORT': os.environ.get('POSTGRES_PORT', ''),
}
}
#
# DATABASE_POOL_ARGS = {
# 'max_overflow': 30,
# 'pool_size': 10
# }
QLF_BASE_URL = os.environ.get('QLF_BASE_URL', 'http://localhost:8000')
if os.environ.get('QLF_REDIS', False):
CHANNEL_LAYERS = {
"default": {
"BACKEND": "asgi_redis.RedisChannelLayer",
"CONFIG": {
"hosts": [(os.environ.get('REDIS_NAME', 'redis'), 6379)],
},
"ROUTING": "qlf.routing.channel_routing",
},
}
X_FRAME_OPTIONS = 'ALLOWALL'
XS_SHARING_ALLOWED_METHODS = ['POST', 'GET', 'OPTIONS', 'PUT', 'DELETE']
EMAIL_USE_TLS = os.environ.get('EMAIL_USE_TLS', None)
EMAIL_HOST = os.environ.get('EMAIL_HOST', 'localhost')
EMAIL_PORT = os.environ.get('EMAIL_PORT', 25)
if DEBUG:
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
INSTALLED_APPS += (
'debug_toolbar',
)
|
linea-it/qlf
|
backend/framework/qlf/qlf/settings.py
|
Python
|
gpl-3.0
| 3,872
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The module file for iosxr_facts
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': [u'preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: iosxr_facts
version_added: 2.2
short_description: Get facts about iosxr devices.
extends_documentation_fragment: iosxr
description:
- Collects facts from network devices running the iosxr operating
system. This module places the facts gathered in the fact tree keyed by the
respective resource name. The facts module will always collect a
base set of facts from the device and can enable or disable
collection of additional facts.
notes:
- Tested against IOS-XR 6.1.3.
- This module works with connection C(network_cli). See L(the IOS-XR Platform Options,../network/user_guide/platform_iosxr.html).
author:
- Ricardo Carrillo Cruz (@rcarrillocruz)
- Nilashish Chakraborty (@Nilashishc)
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, hardware, config, and interfaces. Can specify a list of
values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
required: false
default: '!config'
gather_network_resources:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all and the resources like interfaces, lacp etc.
Can specify a list of values to include a larger subset. Values
can also be used with an initial C(M(!)) to specify that a
specific subset should not be collected.
Valid subsets are 'all', 'lacp', 'lacp_interfaces', 'lldp_global',
'lldp_interfaces', 'interfaces', 'l2_interfaces', 'l3_interfaces',
'lag_interfaces', 'acls', 'acl_interfaces', 'static_routes.
required: false
version_added: "2.9"
"""
EXAMPLES = """
# Gather all facts
- iosxr_facts:
gather_subset: all
gather_network_resources: all
# Collect only the config and default facts
- iosxr_facts:
gather_subset:
- config
# Do not collect hardware facts
- iosxr_facts:
gather_subset:
- "!hardware"
# Collect only the lacp facts
- iosxr_facts:
gather_subset:
- "!all"
- "!min"
gather_network_resources:
- lacp
# Do not collect lacp_interfaces facts
- iosxr_facts:
gather_network_resources:
- "!lacp_interfaces"
# Collect lacp and minimal default facts
- iosxr_facts:
gather_subset: min
gather_network_resources: lacp
# Collect only the interfaces facts
- iosxr_facts:
gather_subset:
- "!all"
- "!min"
gather_network_resources:
- interfaces
- l2_interfaces
"""
RETURN = """
ansible_net_gather_subset:
description: The list of fact subsets collected from the device
returned: always
type: list
# default
ansible_net_version:
description: The operating system version running on the remote device
returned: always
type: str
ansible_net_hostname:
description: The configured hostname of the device
returned: always
type: str
ansible_net_image:
description: The image file the device is running
returned: always
type: str
ansible_net_api:
description: The name of the transport
returned: always
type: str
ansible_net_python_version:
description: The Python version Ansible controller is using
returned: always
type: str
ansible_net_model:
description: The model name returned from the device
returned: always
type: str
# hardware
ansible_net_filesystems:
description: All file system names available on the device
returned: when hardware is configured
type: list
ansible_net_memfree_mb:
description: The available free memory on the remote device in Mb
returned: when hardware is configured
type: int
ansible_net_memtotal_mb:
description: The total memory on the remote device in Mb
returned: when hardware is configured
type: int
# config
ansible_net_config:
description: The current active config from the device
returned: when config is configured
type: str
# interfaces
ansible_net_all_ipv4_addresses:
description: All IPv4 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_all_ipv6_addresses:
description: All IPv6 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_interfaces:
description: A hash of all interfaces running on the system
returned: when interfaces is configured
type: dict
ansible_net_neighbors:
description: The list of LLDP neighbors from the remote device
returned: when interfaces is configured
type: dict
# network resources
ansible_net_gather_network_resources:
description: The list of fact resource subsets collected from the device
returned: always
type: list
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.iosxr.iosxr import iosxr_argument_spec
from ansible.module_utils.network.iosxr.argspec.facts.facts import FactsArgs
from ansible.module_utils.network.iosxr.facts.facts import Facts
def main():
"""
Main entry point for module execution
:returns: ansible_facts
"""
argument_spec = FactsArgs.argument_spec
argument_spec.update(iosxr_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = []
if module.params["gather_subset"] == "!config":
warnings.append('default value for `gather_subset` will be changed to `min` from `!config` v2.11 onwards')
result = Facts(module).get_facts()
ansible_facts, additional_warnings = result
warnings.extend(additional_warnings)
module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
if __name__ == '__main__':
main()
|
roadmapper/ansible
|
lib/ansible/modules/network/iosxr/iosxr_facts.py
|
Python
|
gpl-3.0
| 6,288
|
# $Id: tfont.py,v 1.2 2003/09/14 04:31:39 riq Exp $
#
# Tenes Empanadas Graciela
# Copyright 2000,2003 Ricardo Quesada (riq@coresecurity.com)
#
import pygame
if not pygame.font.get_init():
pygame.font.init()
TFont = {
'helvetica 8' : pygame.font.SysFont('helvetica',8),
'helvetica 10' : pygame.font.SysFont('helvetica',10),
'helvetica 12' : pygame.font.SysFont('helvetica',12),
'helvetica 16' : pygame.font.SysFont('helvetica',16,0),
'helvetica 16b' : pygame.font.SysFont('helvetica',16,1),
'helvetica 20' : pygame.font.SysFont('helvetica',20,0),
'helvetica 20b' : pygame.font.SysFont('helvetica',20,1)
}
|
JeroenDeDauw/teg
|
python/client/gui/tfont.py
|
Python
|
gpl-3.0
| 655
|
#!/usr/bin/env python
"""
@file generateITetrisIntersectionMetrics.py
@author Daniel Krajzewicz
@author Lena Kalleske
@author Michael Behrisch
@date 2007-10-25
@version $Id: generateITetrisIntersectionMetrics.py 14425 2013-08-16 20:11:47Z behrisch $
SUMO, Simulation of Urban MObility; see http://sumo-sim.org/
Copyright (C) 2009-2013 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
from optparse import OptionParser
import os, sys
from numpy import mean
from xml.sax import parse, handler
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import sumolib.net
def getBasicStats(net, lanesInfo, T):
tlsInfo = {}
for tl in net._tlss:
tlID = tl._id
mQueueLen = []
# mWaitTime = []
nbStops = []
tWaitTime = []
seenLanes = set()
for conn in tl._connections:
lane = conn[0]
if lane in seenLanes:
continue
seenLanes.add(lane)
mQueueLenInfo = sum(lanesInfo[lane.getID()]['mQueueLen'])
mQueueLen.append(mQueueLenInfo)
# mWaitTimeInfo = mean(lanesInfo[lane.getID()]['mWaitTime'])
# mWaitTime.append(mWaitTimeInfo)
nbStopsInfo = sum(lanesInfo[lane.getID()]['nbStops'])
nbStops.append(nbStopsInfo)
tWaitTimeInfo = sum(lanesInfo[lane.getID()]['tWaitTime'])
tWaitTime.append(tWaitTimeInfo)
tlsInfo[tlID] = {}
tlsInfo[tlID]['mQueueLen'] = mean(mQueueLen) / T
tlsInfo[tlID]['mWaitTime'] = mean(tWaitTime) / T
tlsInfo[tlID]['nbStops'] = sum(nbStops)
tlsInfo[tlID]['tWaitTime'] = sum(tWaitTime)
return tlsInfo
def mergeInfos(tlsInfoAll, tlsInfoOne, metric):
for tl in tlsInfoOne.keys():
tlsInfoAll[tl][metric] = tlsInfoOne[tl]
def getStatisticsOutput(tlsInfo, outputfile):
opfile = file(outputfile, 'w')
for tl in tlsInfo.keys():
opfile.write('Traffic Light %s\n' % tl)
opfile.write('=================\n')
opfile.write('mean queue length in front of the junction: %s\n' % tlsInfo[tl]['mQueueLen'])
opfile.write('mean waiting time in front of the junction: %s\n' % tlsInfo[tl]['mWaitTime'])
if 'noise' in tlsInfo[tl]:
opfile.write('mean noise emission: %s\n' % tlsInfo[tl]['noise'])
if 'CO' in tlsInfo[tl]:
opfile.write('mean CO emission: %s\n' % tlsInfo[tl]['CO'])
opfile.write('mean CO2 emission: %s\n' % tlsInfo[tl]['CO2'])
opfile.write('mean HC emission: %s\n' % tlsInfo[tl]['HC'])
opfile.write('mean PMx emission: %s\n' % tlsInfo[tl]['PMx'])
opfile.write('mean NOx emission: %s\n' % tlsInfo[tl]['NOx'])
opfile.write('mean fuel consumption: %s\n' % tlsInfo[tl]['fuel'])
opfile.write('number of stops: %s\n' % tlsInfo[tl]['nbStops'])
opfile.write('total waiting time at junction: %s\n\n' % tlsInfo[tl]['tWaitTime'])
def tlsIDToNodeID(net):
tlsID2NodeID = {}
for tls in net._tlss:
tlsID = tls._id
tlsID2NodeID[tlsID] = []
seenNodes = set()
for conn in tls._connections:
lane = conn[0]
edge = lane._edge
node = edge._to
nodeID = node._id
if nodeID not in seenNodes:
tlsID2NodeID[tlsID].append(nodeID)
seenNodes.add(nodeID)
return tlsID2NodeID
class E2OutputReader(handler.ContentHandler):
def __init__(self):
self._lanes = {}
self._maxT = 0
def startElement(self, name, attrs):
if name == 'interval':
detID = attrs['id']
laneID = detID[6:len(detID)]
if not self._lanes.has_key(laneID):
self._lanes[laneID] = {}
self._lanes[laneID]['mQueueLen'] = []
# self._lanes[laneID]['mWaitTime'] = []
self._lanes[laneID]['nbStops'] = []
self._lanes[laneID]['tWaitTime'] = []
if float(attrs['end']) < 100000000:
self._lanes[laneID]['mQueueLen'].append(float(attrs['jamLengthInMetersSum']))
# self._lanes[laneID]['mWaitTime'].append(float(attrs['meanHaltingDuration']))
self._lanes[laneID]['nbStops'].append(float(attrs['startedHalts']))
self._lanes[laneID]['tWaitTime'].append(float(attrs['haltingDurationSum']))
self._maxT = max(float(attrs['end']), self._maxT)
class HarmonoiseReader(handler.ContentHandler):
def __init__(self, net, tlsID2NodeID):
self._nodeIntervalNoise = {}
self._maxT = 0
self._net = net
self._tlsNoise = {}
self._tlsID2NodeID = tlsID2NodeID
def startElement(self, name, attrs):
if name == 'interval':
self._maxT = max(float(attrs['end']), self._maxT)
if name == 'edge':
edgeID = attrs['id']
noiseStr = attrs['noise']
if len(noiseStr) < 10:
noise = float(noiseStr)
else:
noise = 0
if edgeID[0]==':':
nodeID = edgeID[1:edgeID.find('_')]
if nodeID not in self._nodeIntervalNoise:
self._nodeIntervalNoise[nodeID] = []
self._nodeIntervalNoise[nodeID].append(noise)
else:
fromNodeID = net.getEdge(edgeID)._from._id
if fromNodeID not in self._nodeIntervalNoise:
self._nodeIntervalNoise[fromNodeID] = []
self._nodeIntervalNoise[fromNodeID].append(noise)
toNodeID = net.getEdge(edgeID)._to._id
if toNodeID not in self._nodeIntervalNoise:
self._nodeIntervalNoise[toNodeID] = []
self._nodeIntervalNoise[toNodeID].append(noise)
def endElement(self, name):
if name == 'interval':
self.sumIntervalNoise()
if name == 'netstats':
self.sumNoise()
def sumIntervalNoise(self):
for tls in net._tlss:
sum = 0
tlsID = tls._id
if tlsID not in self._tlsNoise:
self._tlsNoise[tlsID] = []
for nodeID in self._tlsID2NodeID[tlsID]:
for noise in self._nodeIntervalNoise[nodeID]:
sum = sum + pow(10, noise/10)
self._tlsNoise[tlsID].append(10 * log(sum)/log(10))
def sumNoise(self):
for tls in net._tlss:
tlsID = tls._id
self._tlsNoise[tlsID] = sum(self._tlsNoise[tlsID]) / self._maxT
class HBEFAReader(handler.ContentHandler):
def __init__(self, net, tlsID2NodeID):
self._maxT = 0
self._net = net
self._nodeIntervalCO = {}
self._nodeIntervalCO2 = {}
self._nodeIntervalHC = {}
self._nodeIntervalPMx = {}
self._nodeIntervalNOx = {}
self._nodeIntervalfuel = {}
self._tlsCO = {}
self._tlsCO2 = {}
self._tlsHC = {}
self._tlsPMx = {}
self._tlsNOx = {}
self._tlsfuel = {}
self._tlsID2NodeID = tlsID2NodeID
def startElement(self, name, attrs):
if name == 'interval':
self._maxT = max(float(attrs['end']), self._maxT)
if name == 'edge':
edgeID = attrs['id']
CO = float(attrs['CO_perVeh'])
CO2 = float(attrs['CO2_perVeh'])
HC = float(attrs['HC_perVeh'])
PMx = float(attrs['PMx_perVeh'])
NOx = float(attrs['NOx_perVeh'])
fuel = float(attrs['fuel_perVeh'])
if edgeID[0]==':':
nodeIDs = edgeID[1:edgeID.find('_')]
else:
fromNodeID = net.getEdge(edgeID)._from._id
toNodeID = net.getEdge(edgeID)._to._id
nodeIDs = [fromNodeID, toNodeID]
for nodeID in nodeIDs:
if nodeID not in self._nodeIntervalCO:
self._nodeIntervalCO[nodeID] = []
self._nodeIntervalCO2[nodeID] = []
self._nodeIntervalHC[nodeID] = []
self._nodeIntervalPMx[nodeID] = []
self._nodeIntervalNOx[nodeID] = []
self._nodeIntervalfuel[nodeID] = []
self._nodeIntervalCO[nodeID].append(CO)
self._nodeIntervalCO2[nodeID].append(CO2)
self._nodeIntervalHC[nodeID].append(HC)
self._nodeIntervalPMx[nodeID].append(PMx)
self._nodeIntervalNOx[nodeID].append(NOx)
self._nodeIntervalfuel[nodeID].append(fuel)
def endElement(self, name):
if name == 'interval':
self.sumInterval()
if name == 'netstats':
self.sum()
def sumInterval(self):
for tls in net._tlss:
tlsID = tls._id
if tlsID not in self._tlsCO:
self._tlsCO[tlsID] = []
self._tlsCO2[tlsID] = []
self._tlsHC[tlsID] = []
self._tlsPMx[tlsID] = []
self._tlsNOx[tlsID] = []
self._tlsfuel[tlsID] = []
sum = 0
for nodeID in self._tlsID2NodeID[tlsID]:
for v in self._nodeIntervalCO[nodeID]:
sum = sum + v
self._tlsCO[tlsID].append(sum)
sum = 0
for nodeID in self._tlsID2NodeID[tlsID]:
for v in self._nodeIntervalCO2[nodeID]:
sum = sum + v
self._tlsCO2[tlsID].append(sum)
sum = 0
for nodeID in self._tlsID2NodeID[tlsID]:
for v in self._nodeIntervalHC[nodeID]:
sum = sum + v
self._tlsHC[tlsID].append(sum)
sum = 0
for nodeID in self._tlsID2NodeID[tlsID]:
for v in self._nodeIntervalPMx[nodeID]:
sum = sum + v
self._tlsPMx[tlsID].append(sum)
sum = 0
for nodeID in self._tlsID2NodeID[tlsID]:
for v in self._nodeIntervalNOx[nodeID]:
sum = sum + v
self._tlsNOx[tlsID].append(sum)
sum = 0
for nodeID in self._tlsID2NodeID[tlsID]:
for v in self._nodeIntervalfuel[nodeID]:
sum = sum + v
self._tlsfuel[tlsID].append(sum)
def sum(self):
for tls in net._tlss:
tlsID = tls._id
self._tlsCO[tlsID] = sum(self._tlsCO[tlsID]) / self._maxT
self._tlsCO2[tlsID] = sum(self._tlsCO2[tlsID]) / self._maxT
self._tlsHC[tlsID] = sum(self._tlsHC[tlsID]) / self._maxT
self._tlsPMx[tlsID] = sum(self._tlsPMx[tlsID]) / self._maxT
self._tlsNOx[tlsID] = sum(self._tlsNOx[tlsID]) / self._maxT
self._tlsfuel[tlsID] = sum(self._tlsfuel[tlsID]) / self._maxT
# initialise
optParser = OptionParser()
optParser.add_option("-n", "--netfile", dest="netfile",
help="name of the netfile (f.e. 'inputs\\pasubio\\a_costa.net.xml')", metavar="<FILE>", type="string")
optParser.add_option("-p", "--path", dest="path",
help="name of folder to work with (f.e. 'inputs\\a_costa\\')", metavar="<FOLDER>", type="string", default="./")
optParser.add_option("-o", "--harmonoiseFile", dest="harmonoiseFile",
help="name of the harmonoise file", metavar="<FOLDER>", type="string")
optParser.add_option("-e", "--HBEFAFile", dest="hbefaFile",
help="name of the HBEFA file", metavar="<FOLDER>", type="string")
optParser.set_usage('\n-n inputs\\pasubio\\pasubio.net.xml -p inputs\\pasubio\\')
# parse options
(options, args) = optParser.parse_args()
if not options.netfile:
print "Missing arguments"
optParser.print_help()
exit()
netfile = options.netfile
e2OutputFile = os.path.join(options.path, 'e2_output.xml')
net = sumolib.net.readNet(netfile)
e2Output = E2OutputReader()
parse(e2OutputFile, e2Output)
tlsID2NodeID = tlsIDToNodeID(net)
tlsInfo = getBasicStats(net, e2Output._lanes, e2Output._maxT)
if options.harmonoiseFile:
harmonoiseOutput = HarmonoiseReader(net, tlsID2NodeID)
parse(options.harmonoiseFile, harmonoiseOutput)
mergeInfos(tlsInfo, harmonoiseOutput._tlsNoise, 'noise')
if options.hbefaFile:
hbefaOutput = HBEFAReader(net, tlsID2NodeID)
parse(hbefaFile, hbefaOutput)
mergeInfos(tlsInfo, hbefaOutput._tlsCO, 'CO')
mergeInfos(tlsInfo, hbefaOutput._tlsCO2, 'CO2')
mergeInfos(tlsInfo, hbefaOutput._tlsHC, 'HC')
mergeInfos(tlsInfo, hbefaOutput._tlsPMx, 'PMx')
mergeInfos(tlsInfo, hbefaOutput._tlsNOx, 'NOx')
mergeInfos(tlsInfo, hbefaOutput._tlsfuel, 'fuel')
getStatisticsOutput(tlsInfo, os.path.join(options.path, "intersection_metrics_summary.txt"))
print 'The calculation is done!'
|
cathyyul/sumo-0.18
|
tools/output/generateITetrisIntersectionMetrics.py
|
Python
|
gpl-3.0
| 12,974
|
from .polarpoint import PolarPoint
__all__ = [
'PolarPoint',
]
|
dfroger/geomalgo
|
geomalgo/polar/__init__.py
|
Python
|
gpl-3.0
| 68
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import pytest
from django.template import Context, Template
from pootle.core.delegate import scores
def _render_str(string, context=None):
context = context or {}
context = Context(context)
return Template(string).render(context)
def test_templatetag_progress_bar():
rendered = _render_str("{% load common_tags %}{% progress_bar 0 0 0 %}")
assert "<span class=\'value translated\'>0%</span>" in rendered
assert '<span class=\'value fuzzy\'>0%</span>' in rendered
assert '<span class=\'value untranslated\'>0%</span>' in rendered
rendered = _render_str(
"{% load common_tags %}{% progress_bar 123 23 73 %}")
assert "<span class=\'value translated\'>59.3%</span>" in rendered
assert "<span class=\'value fuzzy\'>18.7%</span>" in rendered
assert "<span class=\'value untranslated\'>22.0%</span>" in rendered
assert '<td class="translated" style="width: 59.3%">' in rendered
assert '<td class="fuzzy" style="width: 18.7%">' in rendered
assert '<td class="untranslated" style="width: 22.0%">' in rendered
@pytest.mark.django_db
def test_inclusion_tag_top_scorers(project_set, member):
score_data = scores.get(project_set.__class__)(project_set)
rendered = _render_str(
"{% load common_tags %}{% top_scorers user score_data %}",
context=dict(
user=member,
score_data=score_data.display()))
top_scorer = list(score_data.display())[0]
assert top_scorer["public_total_score"] in rendered
assert top_scorer["user"].email_hash in rendered
|
claudep/pootle
|
tests/pootle_misc/templatetags.py
|
Python
|
gpl-3.0
| 1,836
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((4135.63, -1070.6, 11660.5), (0.7, 0.7, 0.7), 507.685)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((4398.66, -1885.45, 11556), (0.7, 0.7, 0.7), 479.978)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((4346, -175.265, 10569.5), (0.7, 0.7, 0.7), 681.834)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((4223.5, 1861.87, 9387.15), (0.7, 0.7, 0.7), 522.532)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((4224.1, 2470.58, 8995.04), (0, 1, 0), 751.925)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((2596.13, 1379.53, 8172.18), (0.7, 0.7, 0.7), 437.001)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((3316.53, 2240.06, 6607.33), (0.7, 0.7, 0.7), 710.767)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((2583.82, 1751.55, 5136.33), (0.7, 0.7, 0.7), 762.077)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((3387, 1724.66, 3770.93), (0.7, 0.7, 0.7), 726.799)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((4317.71, 1934.83, 2179.92), (0.7, 0.7, 0.7), 885.508)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((5963.21, 1383.66, 2065.49), (0.7, 0.7, 0.7), 778.489)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((6342.36, -373.552, 922.922), (0.7, 0.7, 0.7), 790.333)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((6711.54, -2098.47, -184.939), (0.7, 0.7, 0.7), 707.721)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((6382.88, -2315.65, 1390.69), (0.7, 0.7, 0.7), 651.166)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((6780.75, -1179.03, 265.654), (0.7, 0.7, 0.7), 708.61)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((6674.81, 354.788, 232.452), (0.7, 0.7, 0.7), 490.595)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((6063.62, 1175.64, 1159.87), (0.7, 0.7, 0.7), 591.565)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((5400.78, 2218.88, 2007.65), (0.7, 0.7, 0.7), 581.287)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((3951.99, 1285, 2398.29), (0.7, 0.7, 0.7), 789.529)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((3014.12, 2460.53, 2339.28), (0.7, 0.7, 0.7), 623.587)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((2097.93, 3863.9, 1690.04), (0.7, 0.7, 0.7), 1083.56)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((882.166, 4466.2, 643.004), (0.7, 0.7, 0.7), 504.258)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((1847.75, 4807.14, 1639.62), (0.7, 0.7, 0.7), 805.519)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((3845.84, 5275.07, 2103.55), (0.7, 0.7, 0.7), 631.708)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((5900.88, 5746.76, 1743.52), (0.7, 0.7, 0.7), 805.942)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((6891.95, 5990.09, 1469.8), (1, 0.7, 0), 672.697)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((7156.82, 7390.62, 3791.02), (0.7, 0.7, 0.7), 797.863)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((7863.39, 8865, 4672.27), (1, 0.7, 0), 735.682)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((6883.27, 9508.46, 5162.61), (0.7, 0.7, 0.7), 602.14)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((5306.04, 11132.3, 5692.61), (0.7, 0.7, 0.7), 954.796)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((5604.24, 10591.3, 5499.9), (0.7, 0.7, 0.7), 1021.88)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((6435.75, 11209.8, 6255.67), (0.7, 0.7, 0.7), 909.323)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((6546.22, 12724.7, 7940.81), (0.7, 0.7, 0.7), 621.049)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((6903.16, 12091.3, 9204.94), (0.7, 0.7, 0.7), 525.154)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((6500.01, 11137.7, 10309.1), (0.7, 0.7, 0.7), 890.246)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((5691.31, 11026.7, 11880), (0.7, 0.7, 0.7), 671.216)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((4142.55, 10951.5, 12569.6), (0.7, 0.7, 0.7), 662.672)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((3567.94, 11207.3, 11077.2), (0.7, 0.7, 0.7), 646.682)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((4422.45, 12412.7, 10636.6), (0.7, 0.7, 0.7), 769.945)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((5975.48, 11714.3, 9585.63), (0.7, 0.7, 0.7), 606.92)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((6803.89, 12013.8, 10353.8), (0.7, 0.7, 0.7), 622.571)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((6510.18, 11249.9, 9360.04), (0.7, 0.7, 0.7), 466.865)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((6262.14, 10457.4, 10043.2), (0.7, 0.7, 0.7), 682.933)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((6527.49, 11190.3, 9628), (0.7, 0.7, 0.7), 809.326)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((6882.44, 11606.9, 7879.59), (0.7, 0.7, 0.7), 796.72)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((8233.03, 9644.28, 6270.22), (0.7, 0.7, 0.7), 870.026)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((9834.19, 8665.84, 6262.73), (0.7, 0.7, 0.7), 909.577)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((10542.8, 7972.79, 6841.18), (0, 1, 0), 500.536)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((11998.3, 8255.99, 8163.14), (0.7, 0.7, 0.7), 725.276)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((14091.2, 9372.32, 9368.35), (0.7, 0.7, 0.7), 570.331)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((13051.9, 10596.5, 10037), (0.7, 0.7, 0.7), 492.203)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((11660.7, 11231.3, 7572.41), (0, 1, 0), 547.7)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((11147.2, 10890.1, 8058.48), (0.7, 0.7, 0.7), 581.921)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((10302, 11401.5, 9681.69), (0.7, 0.7, 0.7), 555.314)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((9427.83, 11028.2, 10913.8), (0.7, 0.7, 0.7), 404.219)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((8414.68, 9534.64, 10649.3), (0.7, 0.7, 0.7), 764.234)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
batxes/4Cin
|
Six_zebra_models/Six_zebra_models_final_output_0.1_-0.1_13000/Six_zebra_models8148.py
|
Python
|
gpl-3.0
| 13,920
|
#!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2015, Kovid Goyal <kovid at kovidgoyal.net>'
import errno, socket, select, os
from Cookie import SimpleCookie
from contextlib import closing
from urlparse import parse_qs
import repr as reprlib
from email.utils import formatdate
from operator import itemgetter
from future_builtins import map
from urllib import quote as urlquote
from binascii import hexlify, unhexlify
from calibre import prints
from calibre.constants import iswindows
from calibre.utils.config_base import tweaks
from calibre.utils.localization import get_translator
from calibre.utils.socket_inheritance import set_socket_inherit
from calibre.utils.logging import ThreadSafeLog
from calibre.utils.shared_file import share_open, raise_winerror
HTTP1 = 'HTTP/1.0'
HTTP11 = 'HTTP/1.1'
DESIRED_SEND_BUFFER_SIZE = 16 * 1024 # windows 7 uses an 8KB sndbuf
def http_date(timeval=None):
return type('')(formatdate(timeval=timeval, usegmt=True))
class MultiDict(dict): # {{{
def __setitem__(self, key, val):
vals = dict.get(self, key, [])
vals.append(val)
dict.__setitem__(self, key, vals)
def __getitem__(self, key):
return dict.__getitem__(self, key)[-1]
@staticmethod
def create_from_query_string(qs):
ans = MultiDict()
for k, v in parse_qs(qs, keep_blank_values=True).iteritems():
dict.__setitem__(ans, k.decode('utf-8'), [x.decode('utf-8') for x in v])
return ans
def update_from_listdict(self, ld):
for key, values in ld.iteritems():
for val in values:
self[key] = val
def items(self, duplicates=True):
for k, v in dict.iteritems(self):
if duplicates:
for x in v:
yield k, x
else:
yield k, v[-1]
iteritems = items
def values(self, duplicates=True):
for v in dict.itervalues(self):
if duplicates:
for x in v:
yield x
else:
yield v[-1]
itervalues = values
def set(self, key, val, replace_all=False):
if replace_all:
dict.__setitem__(self, key, [val])
else:
self[key] = val
def get(self, key, default=None, all=False):
if all:
try:
return dict.__getitem__(self, key)
except KeyError:
return []
try:
return self.__getitem__(key)
except KeyError:
return default
def pop(self, key, default=None, all=False):
ans = dict.pop(self, key, default)
if ans is default:
return [] if all else default
return ans if all else ans[-1]
def __repr__(self):
return '{' + ', '.join('%s: %s' % (reprlib.repr(k), reprlib.repr(v)) for k, v in self.iteritems()) + '}'
__str__ = __unicode__ = __repr__
def pretty(self, leading_whitespace=''):
return leading_whitespace + ('\n' + leading_whitespace).join(
'%s: %s' % (k, (repr(v) if isinstance(v, bytes) else v)) for k, v in sorted(self.items(), key=itemgetter(0)))
# }}}
def error_codes(*errnames):
''' Return error numbers for error names, ignoring non-existent names '''
ans = {getattr(errno, x, None) for x in errnames}
ans.discard(None)
return ans
socket_errors_eintr = error_codes("EINTR", "WSAEINTR")
socket_errors_socket_closed = error_codes( # errors indicating a disconnected connection
"EPIPE",
"EBADF", "WSAEBADF",
"ENOTSOCK", "WSAENOTSOCK",
"ENOTCONN", "WSAENOTCONN",
"ESHUTDOWN", "WSAESHUTDOWN",
"ETIMEDOUT", "WSAETIMEDOUT",
"ECONNREFUSED", "WSAECONNREFUSED",
"ECONNRESET", "WSAECONNRESET",
"ECONNABORTED", "WSAECONNABORTED",
"ENETRESET", "WSAENETRESET",
"EHOSTDOWN", "EHOSTUNREACH",
)
socket_errors_nonblocking = error_codes(
'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK')
def start_cork(sock):
if hasattr(socket, 'TCP_CORK'):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_CORK, 1)
def stop_cork(sock):
if hasattr(socket, 'TCP_CORK'):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_CORK, 0)
def create_sock_pair(port=0):
'''Create socket pair. Works also on windows by using an ephemeral TCP port.'''
if hasattr(socket, 'socketpair'):
client_sock, srv_sock = socket.socketpair()
set_socket_inherit(client_sock, False), set_socket_inherit(srv_sock, False)
return client_sock, srv_sock
# Create a non-blocking temporary server socket
temp_srv_sock = socket.socket()
set_socket_inherit(temp_srv_sock, False)
temp_srv_sock.setblocking(False)
temp_srv_sock.bind(('127.0.0.1', port))
port = temp_srv_sock.getsockname()[1]
temp_srv_sock.listen(1)
with closing(temp_srv_sock):
# Create non-blocking client socket
client_sock = socket.socket()
client_sock.setblocking(False)
set_socket_inherit(client_sock, False)
try:
client_sock.connect(('127.0.0.1', port))
except socket.error as err:
# EWOULDBLOCK is not an error, as the socket is non-blocking
if err.errno not in socket_errors_nonblocking:
raise
# Use select to wait for connect() to succeed.
timeout = 1
readable = select.select([temp_srv_sock], [], [], timeout)[0]
if temp_srv_sock not in readable:
raise Exception('Client socket not connected in {} second(s)'.format(timeout))
srv_sock = temp_srv_sock.accept()[0]
set_socket_inherit(srv_sock, False)
client_sock.setblocking(True)
return client_sock, srv_sock
def parse_http_list(header_val):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Neither commas nor quotes count if they are escaped.
Only double-quotes count, not single-quotes.
"""
if isinstance(header_val, bytes):
slash, dquote, comma = b'\\",'
empty = b''
else:
slash, dquote, comma = '\\",'
empty = ''
part = empty
escape = quote = False
for cur in header_val:
if escape:
part += cur
escape = False
continue
if quote:
if cur == slash:
escape = True
continue
elif cur == dquote:
quote = False
part += cur
continue
if cur == comma:
yield part.strip()
part = empty
continue
if cur == dquote:
quote = True
part += cur
if part:
yield part.strip()
def parse_http_dict(header_val):
'Parse an HTTP comma separated header with items of the form a=1, b="xxx" into a dictionary'
if not header_val:
return {}
ans = {}
sep, dquote = b'="' if isinstance(header_val, bytes) else '="'
for item in parse_http_list(header_val):
k, v = item.partition(sep)[::2]
if k:
if v.startswith(dquote) and v.endswith(dquote):
v = v[1:-1]
ans[k] = v
return ans
def sort_q_values(header_val):
'Get sorted items from an HTTP header of type: a;q=0.5, b;q=0.7...'
if not header_val:
return []
def item(x):
e, r = x.partition(';')[::2]
p, v = r.partition('=')[::2]
q = 1.0
if p == 'q' and v:
try:
q = max(0.0, min(1.0, float(v.strip())))
except Exception:
pass
return e.strip(), q
return tuple(map(itemgetter(0), sorted(map(item, parse_http_list(header_val)), key=itemgetter(1), reverse=True)))
def eintr_retry_call(func, *args, **kwargs):
while True:
try:
return func(*args, **kwargs)
except EnvironmentError as e:
if getattr(e, 'errno', None) in socket_errors_eintr:
continue
raise
def get_translator_for_lang(cache, bcp_47_code):
try:
return cache[bcp_47_code]
except KeyError:
pass
cache[bcp_47_code] = ans = get_translator(bcp_47_code)
return ans
def encode_path(*components):
'Encode the path specified as a list of path components using URL encoding'
return '/' + '/'.join(urlquote(x.encode('utf-8'), '').decode('ascii') for x in components)
def encode_name(name):
'Encode a name (arbitrary string) as URL safe characters. See decode_name() also.'
if isinstance(name, unicode):
name = name.encode('utf-8')
return hexlify(name)
def decode_name(name):
return unhexlify(name).decode('utf-8')
class Cookie(SimpleCookie):
def _BaseCookie__set(self, key, real_value, coded_value):
if not isinstance(key, bytes):
key = key.encode('ascii') # Python 2.x cannot handle unicode keys
return SimpleCookie._BaseCookie__set(self, key, real_value, coded_value)
def custom_fields_to_display(db):
ckeys = set(db.field_metadata.ignorable_field_keys())
yes_fields = set(tweaks['content_server_will_display'])
no_fields = set(tweaks['content_server_wont_display'])
if '*' in yes_fields:
yes_fields = ckeys
if '*' in no_fields:
no_fields = ckeys
return frozenset(ckeys & (yes_fields - no_fields))
# Logging {{{
class ServerLog(ThreadSafeLog):
exception_traceback_level = ThreadSafeLog.WARN
class RotatingStream(object):
def __init__(self, filename, max_size=None, history=5):
self.filename, self.history, self.max_size = filename, history, max_size
if iswindows:
self.filename = '\\\\?\\' + os.path.abspath(self.filename)
self.set_output()
def set_output(self):
self.stream = share_open(self.filename, 'ab', -1 if iswindows else 1) # line buffered
try:
self.current_pos = self.stream.tell()
except EnvironmentError:
# Happens if filename is /dev/stdout for example
self.current_pos = 0
self.max_size = None
def flush(self):
self.stream.flush()
def prints(self, level, *args, **kwargs):
kwargs['safe_encode'] = True
kwargs['file'] = self.stream
self.current_pos += prints(*args, **kwargs)
if iswindows:
# For some reason line buffering does not work on windows
end = kwargs.get('end', b'\n')
if b'\n' in end:
self.flush()
self.rollover()
def rename(self, src, dest):
try:
if iswindows:
import win32file, pywintypes
try:
win32file.MoveFileEx(src, dest, win32file.MOVEFILE_REPLACE_EXISTING|win32file.MOVEFILE_WRITE_THROUGH)
except pywintypes.error as e:
raise_winerror(e)
else:
os.rename(src, dest)
except EnvironmentError as e:
if e.errno != errno.ENOENT: # the source of the rename does not exist
raise
def rollover(self):
if self.max_size is None or self.current_pos <= self.max_size:
return
self.stream.close()
for i in xrange(self.history - 1, 0, -1):
src, dest = '%s.%d' % (self.filename, i), '%s.%d' % (self.filename, i+1)
self.rename(src, dest)
self.rename(self.filename, '%s.%d' % (self.filename, 1))
self.set_output()
class RotatingLog(ServerLog):
def __init__(self, filename, max_size=None, history=5):
ServerLog.__init__(self)
self.outputs = [RotatingStream(filename, max_size, history)]
def flush(self):
for o in self.outputs:
o.flush()
# }}}
class HandleInterrupt(object): # {{{
# On windows socket functions like accept(), recv(), send() are not
# interrupted by a Ctrl-C in the console. So to make Ctrl-C work we have to
# use this special context manager. See the echo server example at the
# bottom of this file for how to use it.
def __init__(self, action):
if not iswindows:
return # Interrupts work fine on POSIX
self.action = action
from ctypes import WINFUNCTYPE, windll
from ctypes.wintypes import BOOL, DWORD
kernel32 = windll.LoadLibrary('kernel32')
# <http://msdn.microsoft.com/en-us/library/ms686016.aspx>
PHANDLER_ROUTINE = WINFUNCTYPE(BOOL, DWORD)
self.SetConsoleCtrlHandler = kernel32.SetConsoleCtrlHandler
self.SetConsoleCtrlHandler.argtypes = (PHANDLER_ROUTINE, BOOL)
self.SetConsoleCtrlHandler.restype = BOOL
@PHANDLER_ROUTINE
def handle(event):
if event == 0: # CTRL_C_EVENT
if self.action is not None:
self.action()
self.action = None
# Typical C implementations would return 1 to indicate that
# the event was processed and other control handlers in the
# stack should not be executed. However, that would
# prevent the Python interpreter's handler from translating
# CTRL-C to a `KeyboardInterrupt` exception, so we pretend
# that we didn't handle it.
return 0
self.handle = handle
def __enter__(self):
if iswindows:
if self.SetConsoleCtrlHandler(self.handle, 1) == 0:
raise WindowsError()
def __exit__(self, *args):
if iswindows:
if self.SetConsoleCtrlHandler(self.handle, 0) == 0:
raise WindowsError()
# }}}
class Accumulator(object): # {{{
'Optimized replacement for BytesIO when the usage pattern is many writes followed by a single getvalue()'
def __init__(self):
self._buf = []
self.total_length = 0
def append(self, b):
self._buf.append(b)
self.total_length += len(b)
def getvalue(self):
ans = b''.join(self._buf)
self._buf = []
self.total_length = 0
return ans
# }}}
class ReadOnlyFileBuffer(object):
''' A zero copy implementation of a file like object. Uses memoryviews for efficiency. '''
def __init__(self, raw):
self.sz, self.mv = len(raw), (raw if isinstance(raw, memoryview) else memoryview(raw))
self.pos = 0
def tell(self):
return self.pos
def read(self, n=None):
if n is None:
ans = self.mv[self.pos:]
self.pos = self.sz
return ans
ans = self.mv[self.pos:self.pos+n]
self.pos = min(self.pos + n, self.sz)
return ans
def seek(self, pos, whence=os.SEEK_SET):
if whence == os.SEEK_SET:
self.pos = pos
elif whence == os.SEEK_END:
self.pos = self.sz + pos
else:
self.pos += pos
self.pos = max(0, min(self.pos, self.sz))
return self.pos
def getvalue(self):
return self.mv
def close(self):
pass
|
hazrpg/calibre
|
src/calibre/srv/utils.py
|
Python
|
gpl-3.0
| 15,349
|
../../../../../../../../share/pyshared/papyon/service/AddressBook/scenario/contacts/block_contact.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/papyon/service/AddressBook/scenario/contacts/block_contact.py
|
Python
|
gpl-3.0
| 100
|
# Speak.activity
# A simple front end to the espeak text-to-speech engine on the XO laptop
# http://wiki.laptop.org/go/Speak
#
# Copyright (C) 2008 Joshua Minor
# Copyright (C) 2014 Walter Bender
# This file is part of Speak.activity
#
# Parts of Speak.activity are based on code from Measure.activity
# Copyright (C) 2007 Arjun Sarwal - arjun@laptop.org
#
# Speak.activity is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Speak.activity is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Speak.activity. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gdk
from gi.repository import GdkPixbuf
from eye import Eye
from utils import svg_str_to_pixbuf
class Sleepy(Eye):
def __init__(self, fill_color):
Eye.__init__(self, fill_color)
self._pixbuf = svg_str_to_pixbuf(eye_svg())
def draw(self, widget, cr):
bounds = self.get_allocation()
# background
cr.set_source_rgba(*self.fill_color.get_rgba())
cr.rectangle(0, 0, bounds.width, bounds.height)
cr.fill()
w = h = min(bounds.width, bounds.height)
x = int((bounds.width - w) // 2)
y = int((bounds.height - h) // 2)
pixbuf = self._pixbuf.scale_simple(w, h, GdkPixbuf.InterpType.BILINEAR)
cr.translate(x + w / 2., y + h / 2.)
cr.translate(-x - w / 2., -y - h / 2.)
Gdk.cairo_set_source_pixbuf(cr, pixbuf, x, y)
cr.rectangle(x, y, w, h)
cr.fill()
return True
def eye_svg():
return \
'<?xml version="1.0" encoding="UTF-8" standalone="no"?>\n' + \
'<svg\n' + \
' xmlns:svg="http://www.w3.org/2000/svg"\n' + \
' xmlns="http://www.w3.org/2000/svg"\n' + \
' version="1.1"\n' + \
' width="300"\n' + \
' height="300">\n' + \
' <path\n' + \
' d="m 260.26893,151.09803 c -6.07398,14.55176 -15.05894,27.89881 -26.27797,39.03563 -11.21904,11.13683 -24.66333,20.05466 -39.32004,26.08168 -14.65671,6.02702 -30.51431,9.15849 -46.37814,9.15849 -15.86384,0 -31.72144,-3.13147 -46.37815,-9.15849 C 87.257925,210.18832 73.813631,201.27049 62.594594,190.13366 51.375557,178.99684 42.3906,165.64979 36.316616,151.09803"\n' + \
' style="fill:none;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:13.18636799;stroke-linecap:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" />\n' + \
'</svg>\n'
|
walterbender/speak
|
sleepy.py
|
Python
|
gpl-3.0
| 2,928
|
""" UserProfileDB class is a front-end to the User Profile Database
"""
from __future__ import print_function
__RCSID__ = "$Id$"
import os
import sys
import hashlib
from DIRAC import S_OK, S_ERROR, gLogger, gConfig
from DIRAC.Core.Utilities import Time
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
from DIRAC.Core.Base.DB import DB
class UserProfileDB(DB):
""" UserProfileDB class is a front-end to the User Profile Database
"""
tableDict = {'up_Users': {'Fields': {'Id': 'INTEGER AUTO_INCREMENT NOT NULL',
'UserName': 'VARCHAR(32) NOT NULL',
'LastAccess': 'DATETIME',
},
'PrimaryKey': 'Id',
'UniqueIndexes': {'U': ['UserName']},
'Engine': 'InnoDB',
},
'up_Groups': {'Fields': {'Id': 'INTEGER AUTO_INCREMENT NOT NULL',
'UserGroup': 'VARCHAR(32) NOT NULL',
'LastAccess': 'DATETIME',
},
'PrimaryKey': 'Id',
'UniqueIndexes': {'G': ['UserGroup']},
'Engine': 'InnoDB',
},
'up_VOs': {'Fields': {'Id': 'INTEGER AUTO_INCREMENT NOT NULL',
'VO': 'VARCHAR(32) NOT NULL',
'LastAccess': 'DATETIME',
},
'PrimaryKey': 'Id',
'UniqueIndexes': {'VO': ['VO']},
'Engine': 'InnoDB',
},
'up_ProfilesData': {'Fields': {'UserId': 'INTEGER',
'GroupId': 'INTEGER',
'VOId': 'INTEGER',
'Profile': 'VARCHAR(255) NOT NULL',
'VarName': 'VARCHAR(255) NOT NULL',
'Data': 'BLOB',
'ReadAccess': 'VARCHAR(10) DEFAULT "USER"',
'PublishAccess': 'VARCHAR(10) DEFAULT "USER"',
},
'PrimaryKey': ['UserId', 'GroupId', 'Profile', 'VarName'],
'Indexes': {'ProfileKey': ['UserId', 'GroupId', 'Profile'],
'UserKey': ['UserId'],
},
'Engine': 'InnoDB',
},
'up_HashTags': {'Fields': {'UserId': 'INTEGER',
'GroupId': 'INTEGER',
'VOId': 'INTEGER',
'HashTag': 'VARCHAR(32) NOT NULL',
'TagName': 'VARCHAR(255) NOT NULL',
'LastAccess': 'DATETIME',
},
'PrimaryKey': ['UserId', 'GroupId', 'TagName'],
'Indexes': {'HashKey': ['UserId', 'HashTag']},
'Engine': 'InnoDB',
},
}
def __init__(self):
""" Constructor
"""
self.__permValues = ['USER', 'GROUP', 'VO', 'ALL']
self.__permAttrs = ['ReadAccess', 'PublishAccess']
DB.__init__(self, 'UserProfileDB', 'Framework/UserProfileDB')
retVal = self.__initializeDB()
if not retVal['OK']:
raise Exception("Can't create tables: %s" % retVal['Message'])
def _checkTable(self):
""" Make sure the tables are created
"""
return self.__initializeDB()
def __initializeDB(self):
"""
Create the tables
"""
retVal = self._query("show tables")
if not retVal['OK']:
return retVal
tablesInDB = [t[0] for t in retVal['Value']]
tablesD = {}
if 'up_Users' not in tablesInDB:
tablesD['up_Users'] = self.tableDict['up_Users']
if 'up_Groups' not in tablesInDB:
tablesD['up_Groups'] = self.tableDict['up_Groups']
if 'up_VOs' not in tablesInDB:
tablesD['up_VOs'] = self.tableDict['up_VOs']
if 'up_ProfilesData' not in tablesInDB:
tablesD['up_ProfilesData'] = self.tableDict['up_ProfilesData']
if 'up_HashTags' not in tablesInDB:
tablesD['up_HashTags'] = self.tableDict['up_HashTags']
return self._createTables(tablesD)
def __getUserId(self, userName, insertIfMissing=True):
return self.__getObjId(userName, 'UserName', 'up_Users', insertIfMissing)
def __getGroupId(self, groupName, insertIfMissing=True):
return self.__getObjId(groupName, 'UserGroup', 'up_Groups', insertIfMissing)
def __getVOId(self, voName, insertIfMissing=True):
return self.__getObjId(voName, 'VO', 'up_VOs', insertIfMissing)
def __getObjId(self, objValue, varName, tableName, insertIfMissing=True):
result = self.getFields(tableName, ['Id'], {varName: objValue})
if not result['OK']:
return result
data = result['Value']
if len(data) > 0:
objId = data[0][0]
self.updateFields(tableName, ['LastAccess'], ['UTC_TIMESTAMP()'], {'Id': objId})
return S_OK(objId)
if not insertIfMissing:
return S_ERROR("No entry %s for %s defined in the DB" % (objValue, varName))
result = self.insertFields(tableName, [varName, 'LastAccess'], [objValue, 'UTC_TIMESTAMP()'])
if not result['OK']:
return result
return S_OK(result['lastRowId'])
def getUserGroupIds(self, userName, userGroup, insertIfMissing=True):
result = self.__getUserId(userName, insertIfMissing)
if not result['OK']:
return result
userId = result['Value']
result = self.__getGroupId(userGroup, insertIfMissing)
if not result['OK']:
return result
groupId = result['Value']
userVO = Registry.getVOForGroup(userGroup)
if not userVO:
userVO = "undefined"
result = self.__getVOId(userVO, insertIfMissing)
if not result['OK']:
return result
voId = result['Value']
return S_OK((userId, groupId, voId))
def deleteUserProfile(self, userName, userGroup=False):
"""
Delete the profiles for a user
"""
result = self.__getUserId(userName)
if not result['OK']:
return result
userId = result['Value']
condDict = {'UserId': userId}
if userGroup:
result = self.__getGroupId(userGroup)
if not result['OK']:
return result
groupId = result['Value']
condDict['GroupId'] = groupId
result = self.deleteEntries('up_ProfilesData', condDict)
if not result['OK'] or not userGroup:
return result
return self.deleteEntries('up_Users', {'Id': userId})
def __webProfileUserDataCond(self, userIds, sqlProfileName=False, sqlVarName=False):
condSQL = ['`up_ProfilesData`.UserId=%s' % userIds[0],
'`up_ProfilesData`.GroupId=%s' % userIds[1],
'`up_ProfilesData`.VOId=%s' % userIds[2]]
if sqlProfileName:
condSQL.append('`up_ProfilesData`.Profile=%s' % sqlProfileName)
if sqlVarName:
condSQL.append('`up_ProfilesData`.VarName=%s' % sqlVarName)
return " AND ".join(condSQL)
def __webProfileReadAccessDataCond(self, userIds, ownerIds, sqlProfileName, sqlVarName=False, match=False):
permCondSQL = []
sqlCond = []
if match:
sqlCond.append('`up_ProfilesData`.UserId = %s AND `up_ProfilesData`.GroupId = %s' % (ownerIds[0], ownerIds[1]))
else:
permCondSQL.append(
'`up_ProfilesData`.UserId = %s AND `up_ProfilesData`.GroupId = %s' %
(ownerIds[0], ownerIds[1]))
permCondSQL.append('`up_ProfilesData`.GroupId=%s AND `up_ProfilesData`.ReadAccess="GROUP"' % userIds[1])
permCondSQL.append('`up_ProfilesData`.VOId=%s AND `up_ProfilesData`.ReadAccess="VO"' % userIds[2])
permCondSQL.append('`up_ProfilesData`.ReadAccess="ALL"')
sqlCond.append('`up_ProfilesData`.Profile = %s' % sqlProfileName)
if sqlVarName:
sqlCond.append("`up_ProfilesData`.VarName = %s" % (sqlVarName))
# Perms
sqlCond.append("( ( %s ) )" % " ) OR ( ".join(permCondSQL))
return " AND ".join(sqlCond)
def __parsePerms(self, perms, addMissing=True):
normPerms = {}
for pName in self.__permAttrs:
if not perms or pName not in perms:
if addMissing:
normPerms[pName] = self.__permValues[0]
continue
else:
permVal = perms[pName].upper()
for nV in self.__permValues:
if nV == permVal:
normPerms[pName] = nV
break
if pName not in normPerms and addMissing:
normPerms[pName] = self.__permValues[0]
return normPerms
def retrieveVarById(self, userIds, ownerIds, profileName, varName):
"""
Get a data entry for a profile
"""
result = self._escapeString(profileName)
if not result['OK']:
return result
sqlProfileName = result['Value']
result = self._escapeString(varName)
if not result['OK']:
return result
sqlVarName = result['Value']
sqlCond = self.__webProfileReadAccessDataCond(userIds, ownerIds, sqlProfileName, sqlVarName, True)
# when we retrieve the user profile we have to take into account the user.
selectSQL = "SELECT data FROM `up_ProfilesData` WHERE %s" % sqlCond
result = self._query(selectSQL)
if not result['OK']:
return result
data = result['Value']
if len(data) > 0:
return S_OK(data[0][0])
return S_ERROR("No data for userIds %s profileName %s varName %s" % (userIds, profileName, varName))
def retrieveAllUserVarsById(self, userIds, profileName):
"""
Get a data entry for a profile
"""
result = self._escapeString(profileName)
if not result['OK']:
return result
sqlProfileName = result['Value']
sqlCond = self.__webProfileUserDataCond(userIds, sqlProfileName)
selectSQL = "SELECT varName, data FROM `up_ProfilesData` WHERE %s" % sqlCond
result = self._query(selectSQL)
if not result['OK']:
return result
data = result['Value']
return S_OK(dict(data))
def retrieveUserProfilesById(self, userIds):
"""
Get all profiles and data for a user
"""
sqlCond = self.__webProfileUserDataCond(userIds)
selectSQL = "SELECT Profile, varName, data FROM `up_ProfilesData` WHERE %s" % sqlCond
result = self._query(selectSQL)
if not result['OK']:
return result
data = result['Value']
dataDict = {}
for row in data:
if row[0] not in dataDict:
dataDict[row[0]] = {}
dataDict[row[0]][row[1]] = row[2]
return S_OK(dataDict)
def retrieveVarPermsById(self, userIds, ownerIds, profileName, varName):
"""
Get a data entry for a profile
"""
result = self._escapeString(profileName)
if not result['OK']:
return result
sqlProfileName = result['Value']
result = self._escapeString(varName)
if not result['OK']:
return result
sqlVarName = result['Value']
sqlCond = self.__webProfileReadAccessDataCond(userIds, ownerIds, sqlProfileName, sqlVarName)
selectSQL = "SELECT %s FROM `up_ProfilesData` WHERE %s" % (", ".join(self.__permAttrs), sqlCond)
result = self._query(selectSQL)
if not result['OK']:
return result
data = result['Value']
if len(data) > 0:
permDict = {}
for i in range(len(self.__permAttrs)):
permDict[self.__permAttrs[i]] = data[0][i]
return S_OK(permDict)
return S_ERROR("No data for userIds %s profileName %s varName %s" % (userIds, profileName, varName))
def deleteVarByUserId(self, userIds, profileName, varName):
"""
Remove a data entry for a profile
"""
result = self._escapeString(profileName)
if not result['OK']:
return result
sqlProfileName = result['Value']
result = self._escapeString(varName)
if not result['OK']:
return result
sqlVarName = result['Value']
sqlCond = self.__webProfileUserDataCond(userIds, sqlProfileName, sqlVarName)
selectSQL = "DELETE FROM `up_ProfilesData` WHERE %s" % sqlCond
return self._update(selectSQL)
def storeVarByUserId(self, userIds, profileName, varName, data, perms):
"""
Set a data entry for a profile
"""
sqlInsertValues = []
sqlInsertKeys = []
sqlInsertKeys.append(('UserId', userIds[0]))
sqlInsertKeys.append(('GroupId', userIds[1]))
sqlInsertKeys.append(('VOId', userIds[2]))
result = self._escapeString(profileName)
if not result['OK']:
return result
sqlProfileName = result['Value']
sqlInsertKeys.append(('Profile', sqlProfileName))
result = self._escapeString(varName)
if not result['OK']:
return result
sqlVarName = result['Value']
sqlInsertKeys.append(('VarName', sqlVarName))
result = self._escapeString(data)
if not result['OK']:
return result
sqlInsertValues.append(('Data', result['Value']))
normPerms = self.__parsePerms(perms)
for k in normPerms:
sqlInsertValues.append((k, '"%s"' % normPerms[k]))
sqlInsert = sqlInsertKeys + sqlInsertValues
insertSQL = "INSERT INTO `up_ProfilesData` ( %s ) VALUES ( %s )" % (", ".join([f[0] for f in sqlInsert]),
", ".join([str(f[1]) for f in sqlInsert]))
result = self._update(insertSQL)
if result['OK']:
return result
# If error and not duplicate -> real error
if result['Message'].find("Duplicate entry") == -1:
return result
updateSQL = "UPDATE `up_ProfilesData` SET %s WHERE %s" % (", ".join(["%s=%s" % f for f in sqlInsertValues]),
self.__webProfileUserDataCond(userIds,
sqlProfileName,
sqlVarName))
return self._update(updateSQL)
def setUserVarPermsById(self, userIds, profileName, varName, perms):
result = self._escapeString(profileName)
if not result['OK']:
return result
sqlProfileName = result['Value']
result = self._escapeString(varName)
if not result['OK']:
return result
sqlVarName = result['Value']
nPerms = self.__parsePerms(perms, False)
if not nPerms:
return S_OK()
sqlPerms = ",".join(["%s='%s'" % (k, nPerms[k]) for k in nPerms])
updateSql = "UPDATE `up_ProfilesData` SET %s WHERE %s" % (sqlPerms,
self.__webProfileUserDataCond(userIds,
sqlProfileName,
sqlVarName))
return self._update(updateSql)
def retrieveVar(self, userName, userGroup, ownerName, ownerGroup, profileName, varName):
"""
Get a data entry for a profile
"""
result = self.getUserGroupIds(userName, userGroup)
if not result['OK']:
return result
userIds = result['Value']
result = self.getUserGroupIds(ownerName, ownerGroup)
if not result['OK']:
return result
ownerIds = result['Value']
return self.retrieveVarById(userIds, ownerIds, profileName, varName)
def retrieveUserProfiles(self, userName, userGroup):
"""
Helper for getting data
"""
result = self.getUserGroupIds(userName, userGroup)
if not result['OK']:
return result
userIds = result['Value']
return self.retrieveUserProfilesById(userIds)
def retrieveAllUserVars(self, userName, userGroup, profileName):
"""
Helper for getting data
"""
result = self.getUserGroupIds(userName, userGroup)
if not result['OK']:
return result
userIds = result['Value']
return self.retrieveAllUserVarsById(userIds, profileName)
def retrieveVarPerms(self, userName, userGroup, ownerName, ownerGroup, profileName, varName):
result = self.getUserGroupIds(userName, userGroup)
if not result['OK']:
return result
userIds = result['Value']
result = self.getUserGroupIds(ownerName, ownerGroup, False)
if not result['OK']:
return result
ownerIds = result['Value']
return self.retrieveVarPermsById(userIds, ownerIds, profileName, varName)
def setUserVarPerms(self, userName, userGroup, profileName, varName, perms):
result = self.getUserGroupIds(userName, userGroup)
if not result['OK']:
return result
userIds = result['Value']
return self.setUserVarPermsById(userIds, profileName, varName, perms)
def storeVar(self, userName, userGroup, profileName, varName, data, perms=None):
"""
Helper for setting data
"""
try:
result = self.getUserGroupIds(userName, userGroup)
if not result['OK']:
return result
userIds = result['Value']
return self.storeVarByUserId(userIds, profileName, varName, data, perms=perms)
finally:
pass
def deleteVar(self, userName, userGroup, profileName, varName):
"""
Helper for deleting data
"""
try:
result = self.getUserGroupIds(userName, userGroup)
if not result['OK']:
return result
userIds = result['Value']
return self.deleteVarByUserId(userIds, profileName, varName)
finally:
pass
def __profilesCondGenerator(self, value, varType, initialValue=False):
if isinstance(value, basestring):
value = [value]
ids = []
if initialValue:
ids.append(initialValue)
for val in value:
if varType == 'user':
result = self.__getUserId(val, insertIfMissing=False)
elif varType == 'group':
result = self.__getGroupId(val, insertIfMissing=False)
else:
result = self.__getVOId(val, insertIfMissing=False)
if not result['OK']:
continue
ids.append(result['Value'])
if varType == 'user':
fieldName = 'UserId'
elif varType == 'group':
fieldName = 'GroupId'
else:
fieldName = 'VOId'
return "`up_ProfilesData`.%s in ( %s )" % (fieldName, ", ".join([str(iD) for iD in ids]))
def listVarsById(self, userIds, profileName, filterDict=None):
result = self._escapeString(profileName)
if not result['OK']:
return result
sqlProfileName = result['Value']
sqlCond = ["`up_Users`.Id = `up_ProfilesData`.UserId",
"`up_Groups`.Id = `up_ProfilesData`.GroupId",
"`up_VOs`.Id = `up_ProfilesData`.VOId",
self.__webProfileReadAccessDataCond(userIds, userIds, sqlProfileName)]
if filterDict:
fD = {}
for k in filterDict:
fD[k.lower()] = filterDict[k]
filterDict = fD
for k in ('user', 'group', 'vo'):
if k in filterDict:
sqlCond.append(self.__profilesCondGenerator(filterDict[k], k))
sqlVars2Get = ["`up_Users`.UserName", "`up_Groups`.UserGroup", "`up_VOs`.VO", "`up_ProfilesData`.VarName"]
sqlQuery = "SELECT %s FROM `up_Users`, `up_Groups`, `up_VOs`, `up_ProfilesData` WHERE %s" % (", ".join(sqlVars2Get),
" AND ".join(sqlCond))
return self._query(sqlQuery)
def listVars(self, userName, userGroup, profileName, filterDict=None):
result = self.getUserGroupIds(userName, userGroup)
if not result['OK']:
return result
userIds = result['Value']
return self.listVarsById(userIds, profileName, filterDict)
def storeHashTagById(self, userIds, tagName, hashTag=False):
"""
Set a data entry for a profile
"""
if not hashTag:
hashTag = hashlib.md5()
hashTag.update("%s;%s;%s" % (Time.dateTime(), userIds, tagName))
hashTag = hashTag.hexdigest()
result = self.insertFields('up_HashTags', ['UserId', 'GroupId', 'VOId', 'TagName', 'HashTag'],
[userIds[0], userIds[1], userIds[2], tagName, hashTag])
if result['OK']:
return S_OK(hashTag)
# If error and not duplicate -> real error
if result['Message'].find("Duplicate entry") == -1:
return result
result = self.updateFields('up_HashTags', ['HashTag'], [hashTag], {'UserId': userIds[0],
'GroupId': userIds[1],
'VOId': userIds[2],
'TagName': tagName})
if not result['OK']:
return result
return S_OK(hashTag)
def retrieveHashTagById(self, userIds, hashTag):
"""
Get a data entry for a profile
"""
result = self.getFields('up_HashTags', ['TagName'], {'UserId': userIds[0],
'GroupId': userIds[1],
'VOId': userIds[2],
'HashTag': hashTag})
if not result['OK']:
return result
data = result['Value']
if len(data) > 0:
return S_OK(data[0][0])
return S_ERROR("No data for combo userId %s hashTag %s" % (userIds, hashTag))
def retrieveAllHashTagsById(self, userIds):
"""
Get a data entry for a profile
"""
result = self.getFields('up_HashTags', ['HashTag', 'TagName'], {'UserId': userIds[0],
'GroupId': userIds[1],
'VOId': userIds[2]})
if not result['OK']:
return result
data = result['Value']
return S_OK(dict(data))
def storeHashTag(self, userName, userGroup, tagName, hashTag=False):
"""
Helper for storing HASH
"""
try:
result = self.getUserGroupIds(userName, userGroup)
if not result['OK']:
return result
userIds = result['Value']
return self.storeHashTagById(userIds, tagName, hashTag)
finally:
pass
def retrieveHashTag(self, userName, userGroup, hashTag):
"""
Helper for retrieving HASH
"""
try:
result = self.getUserGroupIds(userName, userGroup)
if not result['OK']:
return result
userIds = result['Value']
return self.retrieveHashTagById(userIds, hashTag)
finally:
pass
def retrieveAllHashTags(self, userName, userGroup):
"""
Helper for retrieving HASH
"""
try:
result = self.getUserGroupIds(userName, userGroup)
if not result['OK']:
return result
userIds = result['Value']
return self.retrieveAllHashTagsById(userIds)
finally:
pass
def getUserProfileNames(self, permission):
"""
it returns the available profile names by not taking account the permission: ReadAccess and PublishAccess
"""
result = None
permissions = self.__parsePerms(permission, False)
if not permissions:
return S_OK()
condition = ",".join(["%s='%s'" % (k, permissions[k]) for k in permissions])
query = "SELECT distinct Profile from `up_ProfilesData` where %s" % condition
retVal = self._query(query)
if retVal['OK']:
result = S_OK([i[0] for i in retVal['Value']])
else:
result = retVal
return result
def testUserProfileDB():
""" Some test cases
"""
# building up some fake CS values
gConfig.setOptionValue('DIRAC/Setup', 'Test')
gConfig.setOptionValue('/DIRAC/Setups/Test/Framework', 'Test')
host = '127.0.0.1'
user = 'Dirac'
pwd = 'Dirac'
db = 'AccountingDB'
gConfig.setOptionValue('/Systems/Framework/Test/Databases/UserProfileDB/Host', host)
gConfig.setOptionValue('/Systems/Framework/Test/Databases/UserProfileDB/DBName', db)
gConfig.setOptionValue('/Systems/Framework/Test/Databases/UserProfileDB/User', user)
gConfig.setOptionValue('/Systems/Framework/Test/Databases/UserProfileDB/Password', pwd)
db = UserProfileDB()
assert db._connect()['OK']
userName = 'testUser'
userGroup = 'testGroup'
profileName = 'testProfile'
varName = 'testVar'
tagName = 'testTag'
hashTag = '237cadc4af90277e9524e6386e264630'
data = 'testData'
perms = 'USER'
try:
if False:
for tableName in db.tableDict.keys():
result = db._update('DROP TABLE `%s`' % tableName)
assert result['OK']
gLogger.info('\n Creating Table\n')
# Make sure it is there and it has been created for this test
result = db._checkTable()
assert result == {'OK': True, 'Value': None}
result = db._checkTable()
assert result == {'OK': True, 'Value': 0}
gLogger.info('\n Adding some data\n')
result = db.storeVar(userName, userGroup, profileName, varName, data, perms)
assert result['OK']
assert result['Value'] == 1
gLogger.info('\n Some queries\n')
result = db.getUserGroupIds(userName, userGroup)
assert result['OK']
assert result['Value'] == (1, 1, 1)
result = db.listVars(userName, userGroup, profileName)
assert result['OK']
assert result['Value'][0][3] == varName
result = db.retrieveUserProfiles(userName, userGroup)
assert result['OK']
assert result['Value'] == {profileName: {varName: data}}
result = db.storeHashTag(userName, userGroup, tagName, hashTag)
assert result['OK']
assert result['Value'] == hashTag
result = db.retrieveAllHashTags(userName, userGroup)
assert result['OK']
assert result['Value'] == {hashTag: tagName}
result = db.retrieveHashTag(userName, userGroup, hashTag)
assert result['OK']
assert result['Value'] == tagName
gLogger.info('\n OK\n')
except AssertionError:
print('ERROR ', end=' ')
if not result['OK']:
print(result['Message'])
else:
print(result)
sys.exit(1)
if __name__ == '__main__':
from DIRAC.Core.Base import Script
Script.parseCommandLine()
gLogger.setLevel('VERBOSE')
if 'PYTHONOPTIMIZE' in os.environ and os.environ['PYTHONOPTIMIZE']:
gLogger.info('Unset pyhthon optimization "PYTHONOPTIMIZE"')
sys.exit(0)
testUserProfileDB()
|
fstagni/DIRAC
|
FrameworkSystem/DB/UserProfileDB.py
|
Python
|
gpl-3.0
| 26,525
|
import argparse
import os
from config import *
def main():
parser = argparse.ArgumentParser(prog=os.path.basename(__file__))
globals().update(load_config(parser))
parser.add_argument('--dataset', choices=datasets, required=False)
args = parser.parse_args()
# override default values
if args.dataset:
selected_datasets = [args.dataset]
else:
selected_datasets = datasets
for d in selected_datasets:
for m in methods:
experiment_name = '%s.%s' % (d, m)
command = "qsub -N %s -l q=compute %s/scripts/default_experiment.sh %s %s" % (
experiment_name, os.environ['AUTOWEKA_PATH'], d, m)
print(command)
os.system(command)
if __name__ == "__main__":
main()
|
dsibournemouth/autoweka
|
scripts/launch_default_experiments.py
|
Python
|
gpl-3.0
| 782
|
# -*- coding: utf-8 -*-
import system_tests
class TestFuzzedPoC(metaclass=system_tests.CaseMeta):
url = [
"https://github.com/Exiv2/exiv2/issues/210",
"https://github.com/Exiv2/exiv2/issues/209"
]
filename = system_tests.path("$data_path/2018-01-09-exiv2-crash-002.tiff")
commands = [
"$exiv2 -pR $filename",
"$exiv2 -pS $filename",
"$exiv2 $filename"
]
retval = [1, 1, 0]
compare_stderr = system_tests.check_no_ASAN_UBSAN_errors
def compare_stdout(self, i, command, got_stdout, expected_stdout):
""" We don't care about the stdout, just don't crash """
pass
|
AlienCowEatCake/ImageViewer
|
src/ThirdParty/Exiv2/exiv2-0.27.5-Source/tests/bugfixes/github/test_CVE_2017_17724.py
|
Python
|
gpl-3.0
| 654
|
from .parser import IEMLParser
|
IEMLdev/propositions-restful-server
|
ieml/usl/parser/__init__.py
|
Python
|
gpl-3.0
| 30
|
# Copyright (C) 2013 Johnny Vestergaard <jkv@unixcluster.dk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import bottle
from bottle import get, post, route, static_file, view, HTTPError
import shared_state
import logging
logger = logging.getLogger(__name__)
@route('/unauth')
def login():
return HTTPError(401, 'Unauthorized')
@post('/login')
def login():
"""Authenticate users"""
username = post_get('username')
password = post_get('password')
logger.info("Authentication attempt with username: [{0}]".format(username))
if shared_state.auth.login(username, password):
return "You provided valid credentials"
else:
return HTTPError(401, 'Invalid credentials')
@route('/logout')
def logout():
shared_state.auth.logout(success_redirect='/unauth')
@route('/admin')
@view('admin_page')
def admin():
"""Only admin users can see this"""
shared_state.auth.require(role='admin', fail_redirect='/unauth')
return dict(
current_user=shared_state.auth.current_user,
users=shared_state.auth.list_users(),
roles=shared_state.auth.list_roles()
)
@post('/create_user')
def create_user():
try:
shared_state.auth.create_user(postd().username, postd().role, postd().password)
return dict(ok=True, msg='')
except Exception, e:
return dict(ok=False, msg=e.message)
@post('/delete_user')
def delete_user():
try:
shared_state.auth.delete_user(post_get('username'))
return dict(ok=True, msg='')
except Exception, e:
return dict(ok=False, msg=e.message)
@post('/create_role')
def create_role():
try:
shared_state.auth.create_role(post_get('role'), post_get('level'))
return dict(ok=True, msg='')
except Exception, e:
return dict(ok=False, msg=e.message)
@post('/delete_role')
def delete_role():
try:
shared_state.auth.delete_role(post_get('role'))
return dict(ok=True, msg='')
except Exception, e:
return dict(ok=False, msg=e.message)
def postd():
return bottle.request.forms
def post_get(name, default=''):
return bottle.request.POST.get(name, default).strip()
|
threatstream/mnemosyne
|
webapi/admin.py
|
Python
|
gpl-3.0
| 2,846
|
__author__ = 'sushil'
class InvalidDateFormat(BaseException):
pass
class DateOutOfRange(BaseException):
pass
class InvalidDate(BaseException):
pass
|
ayys/siya
|
pyBSDate/cexceptions.py
|
Python
|
gpl-3.0
| 162
|
"""Example of how to convert a RayTransform operator to a tensorflow layer.
This example is similar to ``tensorflow_layer_matrix``, but demonstrates how
more advanced operators, such as a ray transform, can be handled.
"""
from __future__ import print_function
import tensorflow as tf
import numpy as np
import odl
import odl.contrib.tensorflow
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
space = odl.uniform_discr([-64, -64], [64, 64], [128, 128],
dtype='float32')
geometry = odl.tomo.parallel_beam_geometry(space)
ray_transform = odl.tomo.RayTransform(space, geometry)
x = tf.constant(np.asarray(ray_transform.domain.one()))
z = tf.constant(np.asarray(ray_transform.range.one()))
# Create tensorflow layer from odl operator
odl_op_layer = odl.contrib.tensorflow.as_tensorflow_layer(
ray_transform, 'RayTransform')
# Add empty axes for batch and channel
x_reshaped = x[None, ..., None]
z_reshaped = z[None, ..., None]
# Lazily apply operator in tensorflow
y = odl_op_layer(x_reshaped)
# Evaluate using tensorflow
print(y.eval())
# Compare result with pure ODL
print(ray_transform(x.eval()))
# Evaluate the adjoint of the derivative, called gradient in tensorflow
# We need to scale by cell size to get correct value since the derivative
# in tensorflow uses unweighted spaces.
scale = ray_transform.range.cell_volume / ray_transform.domain.cell_volume
print(tf.gradients(y, [x_reshaped], z_reshaped)[0].eval() * scale)
# Compare result with pure ODL
print(ray_transform.derivative(x.eval()).adjoint(z.eval()))
|
kohr-h/odl
|
odl/contrib/tensorflow/examples/tensorflow_layer_ray_transform.py
|
Python
|
mpl-2.0
| 1,582
|
#!/bin/env python2
# -*- coding: utf-8 -*-
#
# This file is part of the VecNet Zika modeling interface.
# For copyright and licensing information about this package, see the
# NOTICE.txt and LICENSE.txt files in its top-level directory; they are
# available at https://github.com/vecnet/zika
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License (MPL), version 2.0. If a copy of the MPL was not distributed
# with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
class LoginRequiredMixin(object):
""" This works: class InterviewListView(LoginRequiredMixin, ListView)
This DOES NOT work: class InterviewListView(ListView, LoginRequiredMixin)
I'm not 100% sure that wrapping as_view function using Mixin is a good idea though, but whatever
"""
@classmethod
def as_view(cls, **initkwargs):
# Ignore PyCharm warning below, this is a Mixin class after all
view = super(LoginRequiredMixin, cls).as_view(**initkwargs)
return login_required(view)
|
ecaldwe1/zika
|
website/mixins.py
|
Python
|
mpl-2.0
| 1,140
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
import json
import sys
from os.path import join
from hashlib import md5
from tempfile import NamedTemporaryFile
from flask import request
import superdesk
from superdesk.io.registry import register_feeding_service, register_feeding_service_parser
from superdesk.io.feeding_services import FeedingService
from superdesk.io.commands import update_ingest
from superdesk.io import webhooks
from superdesk.errors import IngestApiError, SuperdeskIngestError
logger = logging.getLogger(__name__)
EVENT_UNPUBLISHED = "newsitem.unpublished"
class NewsworthyFeedingServiceAuth(webhooks.FeedingServiceWebhookAuth):
# FIXME: we can't use eve.auth.BasicAuth because eve uses flask.g.user
# internally and superdesk.audit too, resulting in conflict
def authorized(self, allowed_roles, resource, method):
"""Check that webhook data correspond to a known ingest provider
the provider is then registered in the service which will handle the update
"""
username = request.authorization.get("username")
password = request.authorization.get("password")
lookup = {'feeding_service': NewsworthyFeedingService.NAME}
# if at least one provider match this request, we return True
found_provider = False
for provider in superdesk.get_resource_service('ingest_providers').get(req=None, lookup=lookup):
config = provider['config']
if config['username'] == username and config['password'] == password:
secret = (config['secret'] or '').encode('utf-8')
checksum = md5(json.dumps(request.json).encode('utf-8') + secret).hexdigest()
if sys.version_info[:2] <= (3, 5):
# dict was not keeping order before Python 3.6, so the checksum would be random
# FIXME: to be dropped when Python 3.5 support will be dropped
logger.warning("Checksum validation disabled on this Python version, please upgrade to Python 3.6+")
checksum = request.args['checksum']
if checksum != request.args['checksum']:
logger.warning(
"invalid checksum in newsworthy hook for provider {provider_id} (our checksum: {checksum}, "
"given checksum: {given_checksum}), skipping".format(
provider_id=str(provider['_id']),
checksum=checksum,
given_checksum=request.args['checksum']))
else:
NewsworthyWebhookService.requests_map.setdefault(request, []).append(provider)
found_provider = True
return found_provider
class NewsworthyWebhookResource(webhooks.FeedingServiceWebhookResource):
authentication = NewsworthyFeedingServiceAuth
class NewsworthyWebhookService(webhooks.FeedingServiceWebhookService):
requests_map = {}
def trigger_provider(self):
"""Update all provider found by NewsworthyFeedingServiceAuth"""
try:
providers = self.requests_map.pop(request)
except KeyError:
logger.error("Internal error, missing request mapping")
return
for provider in providers:
provider['newsworthy_data'] = request.json
kwargs = {
'provider': provider,
'rule_set': update_ingest.get_provider_rule_set(provider),
'routing_scheme': update_ingest.get_provider_routing_scheme(provider)
}
update_ingest.update_provider.apply_async(
expires=update_ingest.get_task_ttl(provider), kwargs=kwargs)
class NewsworthyFeedingService(FeedingService):
"""
Feeding Service class which can retrieve articles from Newsworthy web service
"""
NAME = 'newsworthy'
ERRORS = [IngestApiError.apiRequestError().get_error_description(),
SuperdeskIngestError.notConfiguredError().get_error_description()]
label = 'Newsworthy'
fields = [
{
'id': 'url', 'type': 'text', 'label': 'Use this URL for webhook',
'default_value': '',
'readonly': True,
},
{
'id': 'username', 'type': 'text', 'label': 'Username',
'required': True
},
{
'id': 'password', 'type': 'password', 'label': 'Password',
'required': True
},
{
'id': 'secret', 'type': 'password', 'label': 'Shared Secret',
'placeholder': 'Shared Secret', 'required': False
},
]
def _update(self, provider, update):
try:
data = provider['newsworthy_data']
except IndexError:
return [[]]
if data['hook']['event'] == EVENT_UNPUBLISHED:
logger.info("ignoring unpublish event on following data:\n{data}".format(data=data))
return [[]]
# we have to write to a temporary file because feed parser expect a file path
# FIXME: it would be better to use the data directly
with NamedTemporaryFile('w') as f:
json.dump(data['data'], f)
f.seek(0)
parser = self.get_feed_parser(provider, f.name)
items = parser.parse(f.name, provider)
return [items]
def init_app(app):
# we have to set URL field here, because config is not available at time
# of parsing
url = join(app.config['SERVER_URL'], 'newsworthy')
url_field = NewsworthyFeedingService.fields[0]
assert url_field['id'] == 'url'
url_field['default_value'] = url
# init_app can be called several times during tests
# so we skip registration if we have an AlreadyExistsError
try:
register_feeding_service(NewsworthyFeedingService)
except superdesk.errors.AlreadyExistsError:
pass
else:
register_feeding_service_parser(NewsworthyFeedingService.NAME, 'ninjs')
service = NewsworthyWebhookService()
resource = NewsworthyWebhookResource("newsworthy", app=app, service=service)
resource.authentication = NewsworthyFeedingServiceAuth
|
petrjasek/superdesk-ntb
|
server/ntb/io/feeding_services/newsworthy.py
|
Python
|
agpl-3.0
| 6,489
|
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from flask import Flask, request
import json
import datetime
from ConfigParser import ConfigParser
import zmq
from monitor_kraken import request_pb2
from monitor_kraken import response_pb2
from monitor_kraken import type_pb2
app = Flask(__name__)
app.config.from_object('monitor_kraken.default_settings')
app.config.from_envvar('MONITOR_CONFIG_FILE', silent=True)
context = zmq.Context()
@app.route('/')
def monitor():
if 'instance' not in request.args:
return json.dumps({'error': ['instance invalid']}), 400
instance = request.args['instance']
config_file = '{path}/{instance}/kraken.ini'.format(
path=app.config['KRAKEN_DIR'],
instance=instance)
parser = ConfigParser()
parser.read(config_file)
try:
uri = parser.get('GENERAL', 'zmq_socket')
except:
return json.dumps({'error': ['instance invalid']}), 500
uri = uri.replace('*', 'localhost')
sock = context.socket(zmq.REQ)
# discard messages when socket closed
sock.setsockopt(zmq.LINGER, 0)
try:
sock.connect(uri)
req = request_pb2.Request()
req.requested_api = type_pb2.STATUS
sock.send(req.SerializeToString())
if sock.poll(app.config['TIMEOUT']) < 1:
return json.dumps({'status': 'timeout'}), 503
pb = sock.recv()
resp = response_pb2.Response()
resp.ParseFromString(pb)
response = {}
return_code = 200
if resp.error and resp.error.message:
response['status'] = resp.error.message
response['start_production_date'] = resp.status.start_production_date
response['end_production_date'] = resp.status.end_production_date
response['last_load'] = resp.status.last_load_at
response['last_load_status'] = resp.status.last_load_status
response['loaded'] = resp.status.loaded
response['is_connected_to_rabbitmq'] = resp.status.is_connected_to_rabbitmq
response['publication_date'] = resp.status.publication_date
if resp.status.last_load_status == False and 'status' not in response:
response['status'] = 'last load failed'
if 'status' not in response:
response['status'] = 'running'
else:
return_code = 503
return json.dumps(response), return_code
finally:
sock.close()
if __name__ == '__main__':
app.run()
|
is06/navitia
|
source/monitor/monitor_kraken/app.py
|
Python
|
agpl-3.0
| 3,672
|
from django.db import models
# Create your models here.
from core.models import BillingSchedule
from core.models import Bill
from core.models import Account
from core.models import FileRepo
from core.models import Config
from core.models import Task
|
tombs/Water-Billing-System
|
waterbilling/tasks/models.py
|
Python
|
agpl-3.0
| 250
|
"""
Classes used to model the roles used in the courseware. Each role is responsible for checking membership,
adding users, removing users, and listing members
"""
import logging
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from django.contrib.auth.models import User
from opaque_keys.edx.django.models import CourseKeyField
from openedx.core.lib.cache_utils import get_cache
from student.models import CourseAccessRole
log = logging.getLogger(__name__)
# A list of registered access roles.
REGISTERED_ACCESS_ROLES = {}
def register_access_role(cls):
"""
Decorator that allows access roles to be registered within the roles module and referenced by their
string values.
Assumes that the decorated class has a "ROLE" attribute, defining its type.
"""
try:
role_name = cls.ROLE
REGISTERED_ACCESS_ROLES[role_name] = cls
except AttributeError:
log.exception(u"Unable to register Access Role with attribute 'ROLE'.")
return cls
class BulkRoleCache(object):
CACHE_NAMESPACE = u"student.roles.BulkRoleCache"
CACHE_KEY = u'roles_by_user'
@classmethod
def prefetch(cls, users):
roles_by_user = defaultdict(set)
get_cache(cls.CACHE_NAMESPACE)[cls.CACHE_KEY] = roles_by_user
for role in CourseAccessRole.objects.filter(user__in=users).select_related('user'):
roles_by_user[role.user.id].add(role)
users_without_roles = filter(lambda u: u.id not in roles_by_user, users)
for user in users_without_roles:
roles_by_user[user.id] = set()
@classmethod
def get_user_roles(cls, user):
return get_cache(cls.CACHE_NAMESPACE)[cls.CACHE_KEY][user.id]
class RoleCache(object):
"""
A cache of the CourseAccessRoles held by a particular user
"""
def __init__(self, user):
try:
self._roles = BulkRoleCache.get_user_roles(user)
except KeyError:
self._roles = set(
CourseAccessRole.objects.filter(user=user).all()
)
def has_role(self, role, course_id, org):
"""
Return whether this RoleCache contains a role with the specified role, course_id, and org
"""
return any(
access_role.role == role and
access_role.course_id == course_id and
access_role.org == org
for access_role in self._roles
)
class AccessRole(object):
"""
Object representing a role with particular access to a resource
"""
__metaclass__ = ABCMeta
@abstractmethod
def has_user(self, user):
"""
Return whether the supplied django user has access to this role.
"""
return False
@abstractmethod
def add_users(self, *users):
"""
Add the role to the supplied django users.
"""
pass
@abstractmethod
def remove_users(self, *users):
"""
Remove the role from the supplied django users.
"""
pass
@abstractmethod
def users_with_role(self):
"""
Return a django QuerySet for all of the users with this role
"""
return User.objects.none()
class GlobalStaff(AccessRole):
"""
The global staff role
"""
def has_user(self, user):
return user.is_staff
def add_users(self, *users):
for user in users:
if user.is_authenticated and user.is_active:
user.is_staff = True
user.save()
def remove_users(self, *users):
for user in users:
# don't check is_authenticated nor is_active on purpose
user.is_staff = False
user.save()
def users_with_role(self):
raise Exception("This operation is un-indexed, and shouldn't be used")
class RoleBase(AccessRole):
"""
Roles by type (e.g., instructor, beta_user) and optionally org, course_key
"""
def __init__(self, role_name, org='', course_key=None):
"""
Create role from required role_name w/ optional org and course_key. You may just provide a role
name if it's a global role (not constrained to an org or course). Provide org if constrained to
an org. Provide org and course if constrained to a course. Although, you should use the subclasses
for all of these.
"""
super(RoleBase, self).__init__()
self.org = org
self.course_key = course_key
self._role_name = role_name
# pylint: disable=arguments-differ
def has_user(self, user, check_user_activation=True):
"""
Check if the supplied django user has access to this role.
Arguments:
user: user to check against access to role
check_user_activation: Indicating whether or not we need to check
user activation while checking user roles
Return:
bool identifying if user has that particular role or not
"""
if check_user_activation and not (user.is_authenticated and user.is_active):
return False
# pylint: disable=protected-access
if not hasattr(user, '_roles'):
# Cache a list of tuples identifying the particular roles that a user has
# Stored as tuples, rather than django models, to make it cheaper to construct objects for comparison
user._roles = RoleCache(user)
return user._roles.has_role(self._role_name, self.course_key, self.org)
def add_users(self, *users):
"""
Add the supplied django users to this role.
"""
# silently ignores anonymous and inactive users so that any that are
# legit get updated.
from student.models import CourseAccessRole
for user in users:
if user.is_authenticated and user.is_active and not self.has_user(user):
entry = CourseAccessRole(user=user, role=self._role_name, course_id=self.course_key, org=self.org)
entry.save()
if hasattr(user, '_roles'):
del user._roles
def remove_users(self, *users):
"""
Remove the supplied django users from this role.
"""
entries = CourseAccessRole.objects.filter(
user__in=users, role=self._role_name, org=self.org, course_id=self.course_key
)
entries.delete()
for user in users:
if hasattr(user, '_roles'):
del user._roles
def users_with_role(self):
"""
Return a django QuerySet for all of the users with this role
"""
# Org roles don't query by CourseKey, so use CourseKeyField.Empty for that query
if self.course_key is None:
self.course_key = CourseKeyField.Empty
entries = User.objects.filter(
courseaccessrole__role=self._role_name,
courseaccessrole__org=self.org,
courseaccessrole__course_id=self.course_key
)
return entries
class CourseRole(RoleBase):
"""
A named role in a particular course
"""
def __init__(self, role, course_key):
"""
Args:
course_key (CourseKey)
"""
super(CourseRole, self).__init__(role, course_key.org, course_key)
@classmethod
def course_group_already_exists(self, course_key):
return CourseAccessRole.objects.filter(org=course_key.org, course_id=course_key).exists()
def __repr__(self):
return '<{}: course_key={}>'.format(self.__class__.__name__, self.course_key)
class OrgRole(RoleBase):
"""
A named role in a particular org independent of course
"""
def __repr__(self):
return '<{}>'.format(self.__class__.__name__)
@register_access_role
class CourseStaffRole(CourseRole):
"""A Staff member of a course"""
ROLE = 'staff'
def __init__(self, *args, **kwargs):
super(CourseStaffRole, self).__init__(self.ROLE, *args, **kwargs)
@register_access_role
class CourseInstructorRole(CourseRole):
"""A course Instructor"""
ROLE = 'instructor'
def __init__(self, *args, **kwargs):
super(CourseInstructorRole, self).__init__(self.ROLE, *args, **kwargs)
@register_access_role
class CourseFinanceAdminRole(CourseRole):
"""A course staff member with privileges to review financial data."""
ROLE = 'finance_admin'
def __init__(self, *args, **kwargs):
super(CourseFinanceAdminRole, self).__init__(self.ROLE, *args, **kwargs)
@register_access_role
class CourseSalesAdminRole(CourseRole):
"""A course staff member with privileges to perform sales operations. """
ROLE = 'sales_admin'
def __init__(self, *args, **kwargs):
super(CourseSalesAdminRole, self).__init__(self.ROLE, *args, **kwargs)
@register_access_role
class CourseBetaTesterRole(CourseRole):
"""A course Beta Tester"""
ROLE = 'beta_testers'
def __init__(self, *args, **kwargs):
super(CourseBetaTesterRole, self).__init__(self.ROLE, *args, **kwargs)
@register_access_role
class LibraryUserRole(CourseRole):
"""
A user who can view a library and import content from it, but not edit it.
Used in Studio only.
"""
ROLE = 'library_user'
def __init__(self, *args, **kwargs):
super(LibraryUserRole, self).__init__(self.ROLE, *args, **kwargs)
class CourseCcxCoachRole(CourseRole):
"""A CCX Coach"""
ROLE = 'ccx_coach'
def __init__(self, *args, **kwargs):
super(CourseCcxCoachRole, self).__init__(self.ROLE, *args, **kwargs)
class OrgStaffRole(OrgRole):
"""An organization staff member"""
def __init__(self, *args, **kwargs):
super(OrgStaffRole, self).__init__('staff', *args, **kwargs)
class OrgInstructorRole(OrgRole):
"""An organization instructor"""
def __init__(self, *args, **kwargs):
super(OrgInstructorRole, self).__init__('instructor', *args, **kwargs)
class OrgLibraryUserRole(OrgRole):
"""
A user who can view any libraries in an org and import content from them, but not edit them.
Used in Studio only.
"""
ROLE = LibraryUserRole.ROLE
def __init__(self, *args, **kwargs):
super(OrgLibraryUserRole, self).__init__(self.ROLE, *args, **kwargs)
@register_access_role
class CourseCreatorRole(RoleBase):
"""
This is the group of people who have permission to create new courses (we may want to eventually
make this an org based role).
"""
ROLE = "course_creator_group"
def __init__(self, *args, **kwargs):
super(CourseCreatorRole, self).__init__(self.ROLE, *args, **kwargs)
@register_access_role
class SupportStaffRole(RoleBase):
"""
Student support team members.
"""
ROLE = "support"
def __init__(self, *args, **kwargs):
super(SupportStaffRole, self).__init__(self.ROLE, *args, **kwargs)
class UserBasedRole(object):
"""
Backward mapping: given a user, manipulate the courses and roles
"""
def __init__(self, user, role):
"""
Create a UserBasedRole accessor: for a given user and role (e.g., "instructor")
"""
self.user = user
self.role = role
def has_course(self, course_key):
"""
Return whether the role's user has the configured role access to the passed course
"""
if not (self.user.is_authenticated and self.user.is_active):
return False
# pylint: disable=protected-access
if not hasattr(self.user, '_roles'):
self.user._roles = RoleCache(self.user)
return self.user._roles.has_role(self.role, course_key, course_key.org)
def add_course(self, *course_keys):
"""
Grant this object's user the object's role for the supplied courses
"""
if self.user.is_authenticated and self.user.is_active:
for course_key in course_keys:
entry = CourseAccessRole(user=self.user, role=self.role, course_id=course_key, org=course_key.org)
entry.save()
if hasattr(self.user, '_roles'):
del self.user._roles
else:
raise ValueError("user is not active. Cannot grant access to courses")
def remove_courses(self, *course_keys):
"""
Remove the supplied courses from this user's configured role.
"""
entries = CourseAccessRole.objects.filter(user=self.user, role=self.role, course_id__in=course_keys)
entries.delete()
if hasattr(self.user, '_roles'):
del self.user._roles
def courses_with_role(self):
"""
Return a django QuerySet for all of the courses with this user x role. You can access
any of these properties on each result record:
* user (will be self.user--thus uninteresting)
* org
* course_id
* role (will be self.role--thus uninteresting)
"""
return CourseAccessRole.objects.filter(role=self.role, user=self.user)
|
ahmedaljazzar/edx-platform
|
common/djangoapps/student/roles.py
|
Python
|
agpl-3.0
| 13,043
|
# -*- coding: utf-8 -*-
"""WebUI."""
from .websocket import WebsocketProxyHandler
def create_webapp(naumanni, **kwargs):
"""App factory.
:param CircleCore core: CircleCore Core
:param str base_url: ベースURL
:param int ws_port: Websocket Port Number
:return: WebUI App
:rtype: CCWebApp
"""
from .app import NaumanniWebApp
app = NaumanniWebApp(naumanni, **kwargs)
return app
|
glucoseinc/naumanni-server
|
naumanni/web/__init__.py
|
Python
|
agpl-3.0
| 422
|
#!/usr/bin/env python
import os
import sys
import dotenv
dotenv.read_dotenv()
if __name__ == "__main__":
ENVIRONMENT = os.getenv('ENVIRONMENT')
if ENVIRONMENT == 'STAGING':
settings = 'staging'
elif ENVIRONMENT == 'PRODUCTION':
settings = 'production'
else:
settings = 'development'
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'blimp_boards.settings')
os.environ.setdefault('DJANGO_CONFIGURATION', settings.title())
from configurations.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
GetBlimp/boards-backend
|
manage.py
|
Python
|
agpl-3.0
| 584
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Vaucher
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{'name': 'Report to printer - Paper tray selection',
'version': '8.0.1.0.1',
'category': 'Printer',
'author': "Camptocamp,Odoo Community Association (OCA)",
'maintainer': 'Camptocamp',
'website': 'http://www.camptocamp.com/',
'license': 'AGPL-3',
'depends': ['base_report_to_printer',
],
'data': [
'users_view.xml',
'ir_report_view.xml',
'printer_view.xml',
'report_xml_action_view.xml',
'security/ir.model.access.csv',
],
'external_dependencies': {
'python': ['cups'],
},
'installable': True,
'auto_install': False,
'application': True,
}
|
rosenvladimirov/addons
|
printer_tray/__openerp__.py
|
Python
|
agpl-3.0
| 1,538
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('video_pipeline', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='videopipelineintegration',
name='client_name',
field=models.CharField(default=b'VEDA-Prod', help_text='Oauth client name of video pipeline service.', max_length=100),
),
migrations.AlterField(
model_name='videopipelineintegration',
name='service_username',
field=models.CharField(default=b'veda_service_user', help_text='Username created for Video Pipeline Integration, e.g. veda_service_user.', max_length=100),
),
]
|
ESOedX/edx-platform
|
openedx/core/djangoapps/video_pipeline/migrations/0002_auto_20171114_0704.py
|
Python
|
agpl-3.0
| 837
|
#!/usr/bin/python
import os
import selinux
import tempfile
import unittest
import blivet
from tests import loopbackedtestcase
import blivet.formats.fs as fs
from blivet.size import Size
@unittest.skipUnless(selinux.is_selinux_enabled() == 1, "SELinux is disabled")
class SELinuxContextTestCase(loopbackedtestcase.LoopBackedTestCase):
"""Testing SELinux contexts.
"""
def __init__(self, methodName='runTest'):
super(SELinuxContextTestCase, self).__init__(methodName=methodName, deviceSpec=[Size("100 MiB")])
def testMountingExt2FS(self):
""" Test that lost+found directory gets assigned correct SELinux
context if installer_mode is True, and retains some random old
context if installer_mode is False.
"""
LOST_AND_FOUND_CONTEXT = 'system_u:object_r:lost_found_t:s0'
an_fs = fs.Ext2FS(device=self.loopDevices[0], label="test")
self.assertIsNone(an_fs.create())
blivet.flags.installer_mode = False
mountpoint = tempfile.mkdtemp("test.selinux")
an_fs.mount(mountpoint=mountpoint)
lost_and_found = os.path.join(mountpoint, "lost+found")
self.assertTrue(os.path.exists(lost_and_found))
lost_and_found_selinux_context = selinux.getfilecon(lost_and_found)
an_fs.unmount()
os.rmdir(mountpoint)
self.assertNotEqual(lost_and_found_selinux_context[1], LOST_AND_FOUND_CONTEXT)
blivet.flags.installer_mode = True
mountpoint = tempfile.mkdtemp("test.selinux")
an_fs.mount(mountpoint=mountpoint)
lost_and_found = os.path.join(mountpoint, "lost+found")
self.assertTrue(os.path.exists(lost_and_found))
lost_and_found_selinux_context = selinux.getfilecon(lost_and_found)
an_fs.unmount()
os.rmdir(mountpoint)
self.assertEqual(lost_and_found_selinux_context[1], LOST_AND_FOUND_CONTEXT)
def testMountingXFS(self):
""" XFS does not have a lost+found directory. """
an_fs = fs.XFS(device=self.loopDevices[0], label="test")
self.assertIsNone(an_fs.create())
blivet.flags.installer_mode = False
mountpoint = tempfile.mkdtemp("test.selinux")
an_fs.mount(mountpoint=mountpoint)
lost_and_found = os.path.join(mountpoint, "lost+found")
self.assertFalse(os.path.exists(lost_and_found))
an_fs.unmount()
os.rmdir(mountpoint)
blivet.flags.installer_mode = True
mountpoint = tempfile.mkdtemp("test.selinux")
an_fs.mount(mountpoint=mountpoint)
lost_and_found = os.path.join(mountpoint, "lost+found")
self.assertFalse(os.path.exists(lost_and_found))
an_fs.unmount()
os.rmdir(mountpoint)
if __name__ == "__main__":
unittest.main()
|
vojtechtrefny/blivet
|
tests/formats_test/selinux_test.py
|
Python
|
lgpl-2.1
| 2,792
|
# -*- coding: utf-8 -*-
# Copyright(C) 2018 Phyks
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import itertools
from collections import Counter
from weboob.capabilities.base import empty
from weboob.capabilities.housing import POSTS_TYPES
class HousingTest(object):
"""
Testing class to standardize the housing modules tests.
"""
# Fields to be checked for values across all items in housings list
FIELDS_ALL_HOUSINGS_LIST = [
"id", "type", "advert_type", "house_type", "url", "title", "area",
"cost", "currency", "utilities", "date", "location", "station", "text",
"phone", "rooms", "bedrooms", "DPE", "GES", "details"
]
# Fields to be checked for at least one item in housings list
FIELDS_ANY_HOUSINGS_LIST = [
"photos"
]
# Fields to be checked for values across all items when querying
# individually
FIELDS_ALL_SINGLE_HOUSING = [
"id", "url", "type", "advert_type", "house_type", "title", "area",
"cost", "currency", "utilities", "date", "location", "station", "text",
"phone", "rooms", "bedrooms", "DPE", "GES", "details"
]
# Fields to be checked for values at least once for all items when querying
# individually
FIELDS_ANY_SINGLE_HOUSING = [
"photos"
]
# Some backends cannot distinguish between rent and furnished rent for
# single housing post. Set this to True if this is the case.
DO_NOT_DISTINGUISH_FURNISHED_RENT = False
def assertNotEmpty(self, obj, field):
self.assertFalse(
empty(getattr(obj, field)),
'Field "%s" is empty and should not be.' % field
)
def check_housing_lists(self, query):
results = list(itertools.islice(
self.backend.search_housings(query),
20
))
self.assertGreater(len(results), 0)
for field in self.FIELDS_ANY_HOUSINGS_LIST:
self.assertTrue(
any(not empty(getattr(x, field)) for x in results),
'Missing a "%s" field.' % field
)
for x in results:
if 'type' in self.FIELDS_ALL_HOUSINGS_LIST:
self.assertEqual(x.type, query.type)
if 'advert_type' in self.FIELDS_ALL_HOUSINGS_LIST:
self.assertIn(x.advert_type, query.advert_types)
if 'house_type' in self.FIELDS_ALL_HOUSINGS_LIST:
self.assertIn(x.house_type, query.house_types)
for field in self.FIELDS_ALL_HOUSINGS_LIST:
self.assertNotEmpty(x, field)
if not empty(x.cost):
self.assertNotEmpty(x, 'price_per_meter')
for photo in x.photos:
self.assertRegexpMatches(photo.url, r'^http(s?)://')
return results
def check_single_housing_all(self, housing,
type, house_types, advert_type):
for field in self.FIELDS_ALL_SINGLE_HOUSING:
self.assertNotEmpty(housing, field)
if 'type' in self.FIELDS_ALL_SINGLE_HOUSING:
if (
self.DO_NOT_DISTINGUISH_FURNISHED_RENT and
type in [POSTS_TYPES.RENT, POSTS_TYPES.FURNISHED_RENT]
):
self.assertIn(housing.type,
[POSTS_TYPES.RENT, POSTS_TYPES.FURNISHED_RENT])
else:
self.assertEqual(housing.type, type)
if 'house_type' in self.FIELDS_ALL_SINGLE_HOUSING:
if not empty(house_types):
self.assertEqual(housing.house_type, house_types)
else:
self.assertNotEmpty(housing, 'house_type')
if 'advert_type' in self.FIELDS_ALL_SINGLE_HOUSING:
self.assertEqual(housing.advert_type, advert_type)
def check_single_housing_any(self, housing, counter):
for field in self.FIELDS_ANY_SINGLE_HOUSING:
if not empty(getattr(housing, field)):
counter[field] += 1
for photo in housing.photos:
self.assertRegexpMatches(photo.url, r'^http(s?)://')
return counter
def check_against_query(self, query):
# Check housing listing results
results = self.check_housing_lists(query)
# Check mandatory fields in all housings
housing = self.backend.get_housing(results[0].id)
self.backend.fillobj(housing, 'phone') # Fetch phone
self.check_single_housing_all(
housing,
results[0].type,
results[0].house_type,
results[0].advert_type
)
# Check fields that should appear in at least one housing
counter = Counter()
counter = self.check_single_housing_any(housing, counter)
for result in results[1:]:
if all(counter[field] > 0 for field in
self.FIELDS_ANY_SINGLE_HOUSING):
break
housing = self.backend.get_housing(result.id)
self.backend.fillobj(housing, 'phone') # Fetch phone
counter = self.check_single_housing_any(housing, counter)
for field in self.FIELDS_ANY_SINGLE_HOUSING:
self.assertGreater(
counter[field],
0,
'Optional field "%s" should appear at least once.' % field
)
|
vicnet/weboob
|
weboob/tools/capabilities/housing/housing_test.py
|
Python
|
lgpl-3.0
| 5,981
|
# coding: utf-8
#This file is part of numword. The COPYRIGHT file at the top level of
#this repository contains the full copyright notices and license terms.
'''
numword for EN_GB
'''
from numword_en import NumWordEN
class NumWordENGB(NumWordEN):
'''
NumWord EN_GB
'''
def currency(self, val, longval=True):
'''
Convert to currency
'''
return self._split(val, hightxt=u"pound/s", lowtxt=u"pence",
jointxt=u"and", longval=longval)
_NW = NumWordENGB()
def cardinal(value):
'''
Convert to cardinal
'''
return _NW.cardinal(value)
def ordinal(value):
'''
Convert to ordinal
'''
return _NW.ordinal(value)
def ordinal_number(value):
'''
Convert to ordinal number
'''
return _NW.ordinal_number(value)
def currency(value, longval=True):
'''
Convert to currency
'''
return _NW.currency(value, longval=longval)
def year(value, longval=True):
'''
Convert to year
'''
return _NW.year(value, longval=longval)
def main():
'''
Main
'''
for val in [ 1, 11, 12, 21, 31, 33, 71, 80, 81, 91, 99, 100, 101, 102, 120, 155,
180, 300, 308, 832, 1000, 1001, 1061, 1100, 1120, 1500, 1701, 1800,
2000, 2010, 2099, 2171, 3000, 8280, 8291, 150000, 500000, 1000000,
2000000, 2000001, -21212121211221211111, -2.121212, -1.0000100,
1325325436067876801768700107601001012212132143210473207540327057320957032975032975093275093275093270957329057320975093272950730]:
_NW.test(val)
if __name__ == "__main__":
main()
|
soshial/text-normalization
|
numword/numword_en_gb.py
|
Python
|
lgpl-3.0
| 1,631
|
# -*- Mode:Python -*-
##########################################################################
# #
# Guacamole Tree printer #
# #
# Copyright 2014 Janek Bevendorff #
# VR Systems Group Bauhaus University Weimar #
# #
# AVANGO is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Lesser General Public License as #
# published by the Free Software Foundation, version 3. #
# #
# AVANGO is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with AVANGO. If not, see <http://www.gnu.org/licenses/>. #
# #
# USAGE: #
# For integrating the tree printer into your project, just import #
# this module. It will automatically monkey patch itself #
# into your scenegraph and tree node objects. Each of those objects #
# will be extended by a print_tree() and print_fields() method. #
# For a list of possible parameters read the pyDoc block of #
# GuajacumTreePrinter.printTree(). #
# #
##########################################################################
import re
import sys
import avango.gua
class GuajacumTreePrinter():
"""
Recursively print the scene graph or subtrees of certain nodes.
This class will be hooked into avango.gua._gua.SceneGraph and
avango.gua._gua.Node to provide a printTree() method for
SceneGraph and Node objects.
"""
def __init__(self, graph):
self._root = graph
def printTree(self, args):
"""
Print Avango scene graph recursively.
@param args: dict of arguments for the tree generation. Possible keys are:
- int max_depth: reduce maximum tree depth (-1 means full tree traversal)
- str exclude_pattern: regular expression to exclude certain nodes by name
- bool print_full_path: print full path for each node (default: False)
- bool print_depth: print depth in tree for each node (default: False)
- bool shorten_sub_trees: shorten subtrees with more than n child nodes
(-1 means full tree traversal)
- str group_by_name: regular expression for grouping child nodes together
- bool print_memory_addr: show the memory address for each node (default: False)
- bool print_field_names: show field names for each node
- bool print_field_values: show values of fields for each node (implies print_field_names)
@type args: dict
@throws Exception: Invalid tree structure
"""
# check given arguments
for i in list(args.keys()):
if i not in self._treeOpts:
print(self._colorize('error', "Invalid argument '" + i + "'"),
file=sys.stderr)
return
joined_args = dict(list(self._treeOpts.items()) + list(args.items()))
_root = self._root
if hasattr(self._root, 'Root'):
_root = self._root.Root.value
elif hasattr(self._root, 'Children'):
_root = self._root
else:
raise Exception(
"Invalid tree structure, missing attributes 'Root' or 'Children'")
self.__printRecursively(_root, 0, joined_args)
def __printRecursively(self, node, cur_depth, args,
cur_path=[],
is_grouped=False):
# return if current node name matches user-specified exclude pattern
if None != args['exclude_pattern'] and re.search(
args['exclude_pattern'], node.Name.value):
return
# push current basename to path stack
cur_path.append(node.Name.value)
obj_name = str(node)
# remove memory address from string representation if not needed
if not args['print_memory_addr']:
obj_name = re.sub(' object at 0x[0-9a-zA-Z]+>$', '>', obj_name)
print(self._indent(
cur_depth, 'Name: %s%s Obj: %s%s%s' %
(self._colorize('important', '"' + node.Name.value + '"'),
self._colorize('bold', ' (Group)')
if is_grouped else '', self._colorize('important', obj_name, ),
' Path: "' + '/'.join(cur_path).replace('//', '/', 1) + '"'
if args['print_full_path'] else '', ' Depth: ' + str(cur_depth)
if args['print_depth'] else '')))
if (args['print_field_values'] or args['print_field_names']
) and node.get_num_fields():
print(self._indent(cur_depth + 1, self._colorize('bold',
'Fields:')))
num_fields = node.get_num_fields()
for i in range(num_fields):
if args['print_field_values']:
print(self._indent(cur_depth + 2, '%s: %s = %s' %
(node.get_field_name(i),
node.get_field(i).__class__.__name__,
str(node.get_field(i).value))))
else:
print(self._indent(cur_depth + 2, '%s: %s' %
(node.get_field_name(i),
node.get_field(i).__class__.__name__)))
# if it's a leaf or max_depth is reached, pop current level from path stack and abort recursion
if 0 == len(node.Children.value) or cur_depth == args['max_depth']:
if len(node.Children.value):
print(self._indent(cur_depth + 1, self._colorize(
'bold', 'Node has children...')))
cur_path.pop()
return
counter = 0
used_name_count = 0
for i in node.Children.value:
# group by names if option 'group_by_name' is set
name_matches = False
if None != args['group_by_name'] and re.search(
args['group_by_name'], i.Name.value):
name_matches = True
used_name_count += 1
if 1 != used_name_count:
continue
# cut off sub trees if shorten_sub_trees is set
if -1 < args['shorten_sub_trees'
] and counter >= args['shorten_sub_trees']:
print(self._indent(cur_depth, \
self._colorize('bold', 'Shortened sub tree (' + str(len(node.Children.value) - counter) + ' more...)')))
break
self.__printRecursively(i, cur_depth + 1, args, cur_path,
used_name_count and name_matches)
counter += 1
if 1 < used_name_count:
print(self._indent(cur_depth, self._colorize(
'bold', 'Grouped children: ' + str(used_name_count))))
# go up the tree stack
cur_path.pop()
def _indent(self, depth, text):
"""
Indent a line to a certain depth.
"""
if 0 >= depth:
return text
return '| ' * (depth - 1) + '|___ ' + text
def _colorize(self, color, text):
"""
Wrap text in ANSI escape codes (terminal color codes).
Possible values for color: important, error, bold
"""
color_codes = {
'important': '\033[1;32m',
'error': '\033[1;93m',
'bold': '\033[1m',
'none': '\033[0m',
}
if color not in color_codes or 'none' == color:
return text
return color_codes[color] + text + color_codes['none']
# possible tree formatting user options
_treeOpts = {
'max_depth': -1,
'exclude_pattern': None,
'print_full_path': False,
'print_depth': False,
'shorten_sub_trees': -1,
'group_by_name': None,
'print_memory_addr': False,
'print_field_names': False,
'print_field_values': False,
}
def _printTree(self, **args):
e314 = GuajacumTreePrinter(self)
e314.printTree(args)
def _printFields(self):
e314 = GuajacumTreePrinter(self)
args = {'print_field_values': True, 'max_depth': 0}
e314.printTree(args)
# now put some antioxidant on our guacamole
avango.gua._gua.SceneGraph.print_tree = _printTree
avango.gua._gua.Node.print_tree = _printTree
avango.gua._gua.SceneGraph.print_fields = _printFields
avango.gua._gua.Node.print_fields = _printFields
|
vrsys/avango
|
avango-utils/python/_guajacum.py
|
Python
|
lgpl-3.0
| 9,600
|
# -*- coding: utf-8 -*-
"""
requests.hooks
~~~~~~~~~~~~~~
This module provides the capabilities for the Requests hooks system.
Available hooks:
``args``:
A dictionary of the arguments being sent to Request().
``pre_request``:
The Request object, directly before being sent.
``post_request``:
The Request object, directly after being sent.
``response``:
The response generated from a Request.
"""
import traceback
HOOKS = ('args', 'pre_request', 'post_request', 'response')
def dispatch_hook(key, hooks, hook_data):
''''''
hooks = hooks or dict()
if key in hooks:
hooks = hooks.get(key)
if hasattr(hooks, '__call__'):
hooks = [hooks]
for hook in hooks:
try:
hook_data = hook(hook_data) or hook_data
except Exception:
traceback.print_exc()
return hook_data
|
DarthMaulware/EquationGroupLeaks
|
Leak #1 - Equation Group Cyber Weapons Auction - Invitation/EQGRP-Free-File/Firewall/EXPLOITS/ELCO/fosho/requests/hooks.py
|
Python
|
unlicense
| 898
|
# Generated by Django 2.1 on 2018-11-15 13:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gw_app', '0002_auto_20181101_1545'),
]
operations = [
migrations.AddField(
model_name='nasmodel',
name='enabled',
field=models.BooleanField(default=True, verbose_name='Enabled'),
),
]
|
bashmak/djing
|
gw_app/migrations/0003_nasmodel_enabled.py
|
Python
|
unlicense
| 410
|
from vt_manager.controller.actions.ActionController import ActionController
from vt_manager.controller.drivers.VTDriver import VTDriver
from vt_manager.models.Action import Action
from vt_manager.models.VirtualMachine import VirtualMachine
import xmlrpclib, threading, logging, copy
from vt_manager.communication.utils.XmlHelper import XmlHelper
from vt_manager.models.resourcesHash import resourcesHash
class InformationDispatcher():
@staticmethod
def listResources(remoteHashValue, projectUUID = 'None', sliceUUID ='None'):
logging.debug("Enter listResources")
infoRspec = XmlHelper.getSimpleInformation()
servers = VTDriver.getAllServers()
baseVM = copy.deepcopy(infoRspec.response.information.resources.server[0].virtual_machine[0])
if not servers:
logging.debug("No VTServers available")
infoRspec.response.information.resources.server.pop()
resourcesString = XmlHelper.craftXmlClass(infoRspec)
localHashValue = str(hash(resourcesString))
else:
for sIndex, server in enumerate(servers):
if(sIndex == 0):
baseServer = copy.deepcopy(infoRspec.response.information.resources.server[0])
if(sIndex != 0):
newServer = copy.deepcopy(baseServer)
infoRspec.response.information.resources.server.append(newServer)
InformationDispatcher.__ServerModelToClass(server, infoRspec.response.information.resources.server[sIndex] )
if (projectUUID is not 'None'):
vms = server.getVMs(projectId = projectUUID)
else:
vms = server.getVMs()
if not vms:
logging.debug("No VMs available")
if infoRspec.response.information.resources.server[sIndex].virtual_machine:
infoRspec.response.information.resources.server[sIndex].virtual_machine.pop()
elif (sliceUUID is not 'None'):
vms = vms.filter(sliceId = sliceUUID)
if not vms:
logging.error("No VMs available")
infoRspec.response.information.resources.server[sIndex].virtual_machine.pop()
for vIndex, vm in enumerate(vms):
if (vIndex != 0):
newVM = copy.deepcopy(baseVM)
infoRspec.response.information.resources.server[sIndex].virtual_machine.append(newVM)
InformationDispatcher.__VMmodelToClass(vm, infoRspec.response.information.resources.server[sIndex].virtual_machine[vIndex])
resourcesString = XmlHelper.craftXmlClass(infoRspec)
localHashValue = str(hash(resourcesString))
try:
rHashObject = resourcesHash.objects.get(projectUUID = projectUUID, sliceUUID = sliceUUID)
rHashObject.hashValue = localHashValue
rHashObject.save()
except:
rHashObject = resourcesHash(hashValue = localHashValue, projectUUID= projectUUID, sliceUUID = sliceUUID)
rHashObject.save()
if remoteHashValue == rHashObject.hashValue:
return localHashValue, ''
else:
return localHashValue, resourcesString
@staticmethod
def listVMTemplatesInfo(serverUUID):
#def listVMTemplatesInfo(serverUUID, callbackURL):
logging.debug("Enter listVMTemplatesInfo")
server = VTDriver.getServerByUUID(serverUUID)
xmlrpc_server = xmlrpclib.Server(server.getAgentURL())
templates_info = xmlrpc_server.list_vm_templates(server.getAgentPassword())
#templates_info = xmlrpc_server.list_vm_templates(callbackURL, server.getAgentPassword())
return str(templates_info)
@staticmethod
def forceListActiveVMs(serverID='None', vmID='None'):
if serverID != 'None':
server = VTDriver.getServerById(serverID)
vtam_vms = server.getVMs()
else:
if vmID != 'None':
servers = VTDriver.getAllServers()
vtam_vms = list()
for server in servers:
vtam_vms = server.getVMs(id=int(vmID))
if vtam_vms:
vmID = vtam_vms[0].getUUID()
break
if not vtam_vms:
raise Exception("VM not found")
xmlrpc_server = xmlrpclib.Server(server.getAgentURL())
# Handle safely the connection against the agent
try:
server_active_vms = xmlrpc_server.force_list_active_vms(server.getAgentPassword(), vmID)
for vm in vtam_vms:
if vm.getUUID() in server_active_vms.keys():
vm.setState("running")
vm.save()
else:
# XXX: avoiding "on queue" and "unknown" states to avoid bad management
#if vm.getState() in ['deleting...', 'failed', 'on queue', 'unknown']:
if vm.getState() in ["deleting...", "failed"]:
child = vm.getChildObject()
server = vm.Server.get()
#Action.objects.all().filter(objectUUID = vm.uuid).delete()
server.deleteVM(vm)
# Keep actions table up-to-date after each deletion
vm_uuids = [ vm.uuid for vm in VirtualMachine.objects.all() ]
Action.objects.all().exclude(objectUUID__in = vm_uuids).delete()
elif vm.getState() in ["running", "starting...", "stopping..."] :
vm.setState("stopped")
vm.save()
else:
continue
except:
server_active_vms = dict()
return server_active_vms
@staticmethod
def __ServerModelToClass(sModel, sClass ):
sClass.name = sModel.getName()
#XXX: CHECK THIS
sClass.id = sModel.id
sClass.uuid = sModel.getUUID()
sClass.operating_system_type = sModel.getOSType()
sClass.operating_system_distribution = sModel.getOSDistribution()
sClass.operating_system_version = sModel.getOSVersion()
sClass.virtualization_type = sModel.getVirtTech()
ifaces = sModel.getNetworkInterfaces()
for ifaceIndex, iface in enumerate(ifaces):
if ifaceIndex != 0:
newInterface = copy.deepcopy(sClass.interfaces.interface[0])
sClass.interfaces.interface.append(newInterface)
if iface.isMgmt:
sClass.interfaces.interface[ifaceIndex].ismgmt = True
else:
sClass.interfaces.interface[ifaceIndex].ismgmt = False
sClass.interfaces.interface[ifaceIndex].name = iface.name
sClass.interfaces.interface[ifaceIndex].switch_id= iface.switchID
sClass.interfaces.interface[ifaceIndex].switch_port = iface.port
@staticmethod
def __VMmodelToClass(VMmodel, VMxmlClass):
VMxmlClass.name = VMmodel.getName()
VMxmlClass.uuid = VMmodel.getUUID()
VMxmlClass.status = VMmodel.getState()
VMxmlClass.project_id = VMmodel.getProjectId()
VMxmlClass.slice_id = VMmodel.getSliceId()
VMxmlClass.project_name = VMmodel.getProjectName()
VMxmlClass.slice_name = VMmodel.getSliceName()
VMxmlClass.operating_system_type = VMmodel.getOSType()
VMxmlClass.operating_system_version = VMmodel.getOSVersion()
VMxmlClass.operating_system_distribution = VMmodel.getOSDistribution()
VMxmlClass.virtualization_type = VMmodel.Server.get().getVirtTech()
VMxmlClass.server_id = VMmodel.Server.get().getUUID()
VMxmlClass.xen_configuration.hd_setup_type = VMmodel.getHdSetupType()
VMxmlClass.xen_configuration.hd_origin_path = VMmodel.getHdOriginPath()
VMxmlClass.xen_configuration.virtualization_setup_type = VMmodel.getVirtualizationSetupType()
VMxmlClass.xen_configuration.memory_mb = VMmodel.getMemory()
ActionController.PopulateNetworkingParams(VMxmlClass.xen_configuration.interfaces.interface, VMmodel)
|
dana-i2cat/felix
|
vt_manager/src/python/vt_manager/controller/dispatchers/xmlrpc/InformationDispatcher.py
|
Python
|
apache-2.0
| 7,769
|
import numbers
from six import string_types
from pypif.obj.common.pio import Pio
from pypif.obj.common.scalar import Scalar
from pypif.obj.common.file_reference import FileReference
class Value(Pio):
"""
Information about a scalar, vector, or matrix, or a list of one of those.
"""
def __init__(self, name=None, scalars=None, vectors=None, matrices=None, files=None,
units=None, tags=None, **kwargs):
"""
Constructor.
:param name: String with the name of the value.
:param scalars: One or more dictionaries, strings, numbers, or :class:`.Scalar` objects.
:param vectors: One or more lists of dictionaries, strings, numbers, or :class:`.Scalar` objects,
each representing a vector.
:param matrices: One of more lists of lists of dictionaries, strings, numbers, or :class:`.Scalar` objects,
each representing a matrix with rows as the innermost lists.
:param files: One of more dictionaries, strings, or :class:`.FileReference` objects.
:param units: String with the units of the value.
:param tags: List of strings or numbers that are tags for this object.
:param kwargs: Dictionary of fields that are not supported.
"""
super(Value, self).__init__(tags=tags, **kwargs)
self._name = None
self.name = name
self._files = None
self.files = files
self._scalars = None
if scalars is not None:
self.scalars = scalars
self._vectors = None
if vectors is not None:
self.vectors = vectors
self._matrices = None
if matrices is not None:
self.matrices = matrices
self._units = None
self.units = units
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._validate_type('name', name, string_types)
self._name = name
@name.deleter
def name(self):
self._name = None
@property
def scalars(self):
return self._scalars
@scalars.setter
def scalars(self, scalars):
self._validate_list_type('scalars', scalars, dict, string_types, numbers.Number, Scalar)
self._scalars = self._get_object(Scalar, scalars)
self._scalars = [Scalar.normalize(x)
for x in (self._scalars if isinstance(self._scalars, list) else [self._scalars])]
@scalars.deleter
def scalars(self):
self._scalars = None
@property
def vectors(self):
return self._vectors
@vectors.setter
def vectors(self, vectors):
self._validate_nested_list_type('vectors', vectors, 2, dict, string_types, numbers.Number, Scalar)
self._vectors = self._get_object(Scalar, vectors)
self._vectors = [list(map(Scalar.normalize, x))
for x in (self._vectors if isinstance(self._vectors[0], list) else [self._vectors])]
@vectors.deleter
def vectors(self):
self._vectors = None
@property
def matrices(self):
return self._matrices
@matrices.setter
def matrices(self, matrices):
self._validate_nested_list_type('matrices', matrices, 3, dict, string_types, numbers.Number, Scalar)
self._matrices = self._get_object(Scalar, matrices)
self._matrices = [list(map(lambda z: list(map(Scalar.normalize, z)), x))
for x in (self._matrices if isinstance(self._matrices[0][0], list) else [self._matrices])]
@matrices.deleter
def matrices(self):
self._matrices = None
@property
def units(self):
return self._units
@units.setter
def units(self, units):
self._validate_type('units', units, string_types)
self._units = units
@units.deleter
def units(self):
self._units = None
@property
def files(self):
return self._files
@files.setter
def files(self, files):
self._validate_list_type('files', files, dict, FileReference)
self._files = files
@files.deleter
def files(self):
self._files = None
def normalize(self):
if self.scalars is not None:
self.scalars = self.scalars
if self.vectors is not None:
self.vectors = self.vectors
if self.matrices is not None:
self.matrices = self.matrices
|
maxhutch/pypif
|
pypif/obj/common/value.py
|
Python
|
apache-2.0
| 4,437
|
# -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import
# Import Salt Libs
from salt import acl
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import (
NO_MOCK,
NO_MOCK_REASON,
)
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
@skipIf(NO_MOCK, NO_MOCK_REASON)
class ClientACLTestCase(TestCase):
'''
Unit tests for salt.acl.ClientACL
'''
def setUp(self):
self.blacklist = {
'users': ['joker', 'penguin'],
'modules': ['cmd.run', 'test.fib'],
}
def test_user_is_blacklisted(self):
'''
test user_is_blacklisted
'''
client_acl = acl.PublisherACL(self.blacklist)
self.assertTrue(client_acl.user_is_blacklisted('joker'))
self.assertTrue(client_acl.user_is_blacklisted('penguin'))
self.assertFalse(client_acl.user_is_blacklisted('batman'))
self.assertFalse(client_acl.user_is_blacklisted('robin'))
def test_cmd_is_blacklisted(self):
'''
test cmd_is_blacklisted
'''
client_acl = acl.PublisherACL(self.blacklist)
self.assertTrue(client_acl.cmd_is_blacklisted('cmd.run'))
self.assertTrue(client_acl.cmd_is_blacklisted('test.fib'))
self.assertFalse(client_acl.cmd_is_blacklisted('cmd.shell'))
self.assertFalse(client_acl.cmd_is_blacklisted('test.versions'))
self.assertTrue(client_acl.cmd_is_blacklisted(['cmd.run', 'state.sls']))
self.assertFalse(client_acl.cmd_is_blacklisted(['state.highstate', 'state.sls']))
if __name__ == '__main__':
from integration import run_tests
run_tests(ClientACLTestCase, needs_daemon=False)
|
stephane-martin/salt-debian-packaging
|
salt-2016.3.3/tests/unit/acl/client_test.py
|
Python
|
apache-2.0
| 1,751
|
# Monary - Copyright 2011-2013 David J. C. Beach
# Please see the included LICENSE.TXT and NOTICE.TXT for licensing information.
from time import time
class profile(object):
def __init__(self, name):
self._name = name
def __enter__(self):
self._start = time()
def __exit__(self, *args):
stop = time()
print "%s took %6.2f s" % (self._name, stop - self._start)
|
ksuarz/mongo-monary-driver
|
timing/profile.py
|
Python
|
apache-2.0
| 429
|
import pytest
from indy import IndyError
from indy import did
from indy import wallet
from indy.error import ErrorCode
@pytest.mark.asyncio
@pytest.mark.parametrize("wallet_handle_cleanup", [False])
async def test_import_wallet_works(wallet_handle, wallet_config, credentials, export_config):
(_did, _verkey) = await did.create_and_store_my_did(wallet_handle, "{}")
await did.set_did_metadata(wallet_handle, _did, "metadata")
did_with_meta_before = await did.get_my_did_with_meta(wallet_handle, _did)
await wallet.export_wallet(wallet_handle, export_config)
await wallet.close_wallet(wallet_handle)
await wallet.delete_wallet(wallet_config, credentials)
await wallet.import_wallet(wallet_config, credentials, export_config)
wallet_handle = await wallet.open_wallet(wallet_config, credentials)
did_with_meta_after = await did.get_my_did_with_meta(wallet_handle, _did)
assert did_with_meta_before == did_with_meta_after
await wallet.close_wallet(wallet_handle)
@pytest.mark.asyncio
async def test_import_wallet_works_for_not_exit_path(wallet_config, credentials, export_config):
with pytest.raises(IndyError) as e:
await wallet.import_wallet(wallet_config, credentials, export_config)
assert ErrorCode.CommonIOError == e.value.error_code
|
srottem/indy-sdk
|
wrappers/python/tests/wallet/test_import_wallet.py
|
Python
|
apache-2.0
| 1,307
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to FeatureColumn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.layers.python.layers import embedding_ops
from tensorflow.contrib.layers.python.layers import feature_column as fc
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_py
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
def _embeddings_from_arguments(column,
args,
weight_collections,
trainable,
output_rank=2):
"""Returns embeddings for a column based on the computed arguments.
Args:
column: the column name.
args: the _DeepEmbeddingLookupArguments for this column.
weight_collections: collections to store weights in.
trainable: whether these embeddings should be trainable.
output_rank: the desired rank of the returned `Tensor`. Inner dimensions will
be combined to produce the desired rank.
Returns:
the embeddings.
Raises:
ValueError: if not possible to create.
"""
# pylint: disable=protected-access
input_tensor = layers._inner_flatten(args.input_tensor, output_rank)
weight_tensor = None
if args.weight_tensor is not None:
weight_tensor = layers._inner_flatten(args.weight_tensor, output_rank)
# pylint: enable=protected-access
if args.hashed:
embeddings = contrib_variables.model_variable(
name='weights',
shape=[args.vocab_size],
dtype=dtypes.float32,
initializer=args.initializer,
trainable=trainable,
collections=weight_collections)
return embedding_ops.hashed_embedding_lookup_sparse(
embeddings, input_tensor, args.dimension,
combiner=args.combiner, name='lookup')
if args.shared_embedding_name is not None:
shared_embedding_collection_name = (
'SHARED_EMBEDDING_COLLECTION_' + args.shared_embedding_name.upper())
graph = ops.get_default_graph()
shared_embedding_collection = (
graph.get_collection_ref(shared_embedding_collection_name))
shape = [args.vocab_size, args.dimension]
if shared_embedding_collection:
if len(shared_embedding_collection) > 1:
raise ValueError('Collection %s can only contain one '
'(partitioned) variable.'
% shared_embedding_collection_name)
else:
embeddings = shared_embedding_collection[0]
if embeddings.get_shape() != shape:
raise ValueError('The embedding variable with name {} already '
'exists, but its shape does not match required '
'embedding shape here. Please make sure to use '
'different shared_embedding_name for different '
'shared embeddings.'.format(
args.shared_embedding_name))
else:
embeddings = contrib_variables.model_variable(
name=args.shared_embedding_name,
shape=shape,
dtype=dtypes.float32,
initializer=args.initializer,
trainable=trainable,
collections=weight_collections)
graph.add_to_collection(shared_embedding_collection_name, embeddings)
else:
embeddings = contrib_variables.model_variable(
name='weights',
shape=[args.vocab_size, args.dimension],
dtype=dtypes.float32,
initializer=args.initializer,
trainable=trainable,
collections=weight_collections)
if isinstance(embeddings, variables.Variable):
embeddings = [embeddings]
else:
embeddings = embeddings._get_variable_list() # pylint: disable=protected-access
# pylint: disable=protected-access
_maybe_restore_from_checkpoint(
column._checkpoint_path(), embeddings)
return embedding_ops.safe_embedding_lookup_sparse(
embeddings,
input_tensor,
sparse_weights=weight_tensor,
combiner=args.combiner,
name=column.name + 'weights')
def _input_from_feature_columns(columns_to_tensors,
feature_columns,
weight_collections,
trainable,
scope,
output_rank,
default_name):
"""Implementation of `input_from(_sequence)_feature_columns`."""
check_feature_columns(feature_columns)
with variable_scope.variable_scope(scope,
default_name=default_name,
values=columns_to_tensors.values()):
output_tensors = []
transformer = _Transformer(columns_to_tensors)
if weight_collections:
weight_collections = list(set(list(weight_collections) +
[ops.GraphKeys.GLOBAL_VARIABLES]))
for column in sorted(set(feature_columns), key=lambda x: x.key):
with variable_scope.variable_scope(None,
default_name=column.name,
values=columns_to_tensors.values()):
transformed_tensor = transformer.transform(column)
try:
# pylint: disable=protected-access
arguments = column._deep_embedding_lookup_arguments(
transformed_tensor)
output_tensors.append(_embeddings_from_arguments(
column,
arguments,
weight_collections,
trainable,
output_rank=output_rank))
except NotImplementedError as ee:
try:
# pylint: disable=protected-access
output_tensors.append(column._to_dnn_input_layer(
transformed_tensor,
weight_collections,
trainable,
output_rank=output_rank))
except ValueError as e:
raise ValueError('Error creating input layer for column: {}.\n'
'{}, {}'.format(column.name, e, ee))
return array_ops.concat(output_rank - 1, output_tensors)
def input_from_feature_columns(columns_to_tensors,
feature_columns,
weight_collections=None,
trainable=True,
scope=None):
"""A tf.contrib.layer style input layer builder based on FeatureColumns.
Generally a single example in training data is described with feature columns.
At the first layer of the model, this column oriented data should be converted
to a single tensor. Each feature column needs a different kind of operation
during this conversion. For example sparse features need a totally different
handling than continuous features.
An example usage of input_from_feature_columns is as follows:
# Building model for training
columns_to_tensor = tf.parse_example(...)
first_layer = input_from_feature_columns(
columns_to_tensors=columns_to_tensor,
feature_columns=feature_columns)
second_layer = fully_connected(first_layer, ...)
...
where feature_columns can be defined as follows:
occupation = sparse_column_with_hash_bucket(column_name="occupation",
hash_bucket_size=1000)
occupation_emb = embedding_column(sparse_id_column=occupation, dimension=16,
combiner="sum")
age = real_valued_column("age")
age_buckets = bucketized_column(
source_column=age,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
feature_columns=[occupation_emb, age_buckets]
Args:
columns_to_tensors: A mapping from feature column to tensors. 'string' key
means a base feature (not-transformed). It can have FeatureColumn as a
key too. That means that FeatureColumn is already transformed by input
pipeline. For example, `inflow` may have handled transformations.
feature_columns: A set containing all the feature columns. All items in the
set should be instances of classes derived by FeatureColumn.
weight_collections: List of graph collections to which weights are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
A Tensor which can be consumed by hidden layers in the neural network.
Raises:
ValueError: if FeatureColumn cannot be consumed by a neural network.
"""
return _input_from_feature_columns(columns_to_tensors,
feature_columns,
weight_collections,
trainable,
scope,
output_rank=2,
default_name='input_from_feature_columns')
@experimental
def sequence_input_from_feature_columns(columns_to_tensors,
feature_columns,
weight_collections=None,
trainable=True,
scope=None):
"""Builds inputs for sequence models from `FeatureColumn`s.
See documentation for `input_from_feature_columns`. The following types of
`FeatureColumn` are permitted in `feature_columns`: `_OneHotColumn`,
`_EmbeddingColumn`, `_HashedEmbeddingColumn`, `_RealValuedColumn`,
`_DataFrameColumn`. In addition, columns in `feature_columns` may not be
constructed using any of the following: `HashedEmbeddingColumn`,
`BucketizedColumn`, `CrossedColumn`.
Args:
columns_to_tensors: A mapping from feature column to tensors. 'string' key
means a base feature (not-transformed). It can have FeatureColumn as a
key too. That means that FeatureColumn is already transformed by input
pipeline. For example, `inflow` may have handled transformations.
feature_columns: A set containing all the feature columns. All items in the
set should be instances of classes derived by FeatureColumn.
weight_collections: List of graph collections to which weights are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
A Tensor which can be consumed by hidden layers in the neural network.
Raises:
ValueError: if FeatureColumn cannot be consumed by a neural network.
"""
_check_supported_sequence_columns(feature_columns)
_check_forbidden_sequence_columns(feature_columns)
return _input_from_feature_columns(
columns_to_tensors,
feature_columns,
weight_collections,
trainable,
scope,
output_rank=3,
default_name='sequence_input_from_feature_columns')
def _create_embedding_lookup(column,
columns_to_tensors,
embedding_lookup_arguments,
num_outputs,
trainable,
weight_collections):
"""Creates variables and returns predictions for linear weights in a model.
Args:
column: the column we're working on.
columns_to_tensors: a map from column name to tensors.
embedding_lookup_arguments: arguments for embedding lookup.
num_outputs: how many outputs.
trainable: whether the variable we create is trainable.
weight_collections: weights will be placed here.
Returns:
variables: the created embeddings.
predictions: the computed predictions.
"""
with variable_scope.variable_scope(
None, default_name=column.name, values=columns_to_tensors.values()):
variable = contrib_variables.model_variable(
name='weights',
shape=[embedding_lookup_arguments.vocab_size, num_outputs],
dtype=dtypes.float32,
initializer=embedding_lookup_arguments.initializer,
trainable=trainable,
collections=weight_collections)
if isinstance(variable, variables.Variable):
variable = [variable]
else:
variable = variable._get_variable_list() # pylint: disable=protected-access
predictions = embedding_ops.safe_embedding_lookup_sparse(
variable,
embedding_lookup_arguments.input_tensor,
sparse_weights=embedding_lookup_arguments.weight_tensor,
combiner=embedding_lookup_arguments.combiner,
name=column.name + '_weights')
return variable, predictions
def _maybe_restore_from_checkpoint(checkpoint_path, variable):
if checkpoint_path is not None:
path, tensor_name = checkpoint_path
weights_to_restore = variable
if len(variable) == 1:
weights_to_restore = variable[0]
checkpoint_utils.init_from_checkpoint(path,
{tensor_name: weights_to_restore})
def _create_joint_embedding_lookup(columns_to_tensors,
embedding_lookup_arguments,
num_outputs,
trainable,
weight_collections):
"""Creates an embedding lookup for all columns sharing a single weight."""
for arg in embedding_lookup_arguments:
assert arg.weight_tensor is None, (
'Joint sums for weighted sparse columns are not supported. '
'Please use weighted_sum_from_feature_columns instead.')
assert arg.combiner == 'sum', (
'Combiners other than sum are not supported for joint sums. '
'Please use weighted_sum_from_feature_columns instead.')
assert len(embedding_lookup_arguments) >= 1, (
'At least one column must be in the model.')
prev_size = 0
sparse_tensors = []
for a in embedding_lookup_arguments:
t = a.input_tensor
values = t.values + prev_size
prev_size += a.vocab_size
sparse_tensors.append(
sparse_tensor_py.SparseTensor(t.indices,
values,
t.shape))
sparse_tensor = sparse_ops.sparse_concat(1, sparse_tensors)
with variable_scope.variable_scope(
None, default_name='linear_weights', values=columns_to_tensors.values()):
variable = contrib_variables.model_variable(
name='weights',
shape=[prev_size, num_outputs],
dtype=dtypes.float32,
initializer=init_ops.zeros_initializer,
trainable=trainable,
collections=weight_collections)
if isinstance(variable, variables.Variable):
variable = [variable]
else:
variable = variable._get_variable_list() # pylint: disable=protected-access
predictions = embedding_ops.safe_embedding_lookup_sparse(
variable,
sparse_tensor,
sparse_weights=None,
combiner='sum',
name='_weights')
return variable, predictions
def joint_weighted_sum_from_feature_columns(columns_to_tensors,
feature_columns,
num_outputs,
weight_collections=None,
trainable=True,
scope=None):
"""A restricted linear prediction builder based on FeatureColumns.
As long as all feature columns are unweighted sparse columns this computes the
prediction of a linear model which stores all weights in a single variable.
Args:
columns_to_tensors: A mapping from feature column to tensors. 'string' key
means a base feature (not-transformed). It can have FeatureColumn as a
key too. That means that FeatureColumn is already transformed by input
pipeline. For example, `inflow` may have handled transformations.
feature_columns: A set containing all the feature columns. All items in the
set should be instances of classes derived from FeatureColumn.
num_outputs: An integer specifying number of outputs. Default value is 1.
weight_collections: List of graph collections to which weights are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
A tuple containing:
* A Tensor which represents predictions of a linear model.
* A list of Variables storing the weights.
* A Variable which is used for bias.
Raises:
ValueError: if FeatureColumn cannot be used for linear predictions.
"""
check_feature_columns(feature_columns)
with variable_scope.variable_scope(
scope,
default_name='joint_weighted_sum_from_feature_columns',
values=columns_to_tensors.values()):
transformer = _Transformer(columns_to_tensors)
embedding_lookup_arguments = []
for column in sorted(set(feature_columns), key=lambda x: x.key):
transformed_tensor = transformer.transform(column)
try:
embedding_lookup_arguments.append(
column._wide_embedding_lookup_arguments(transformed_tensor)) # pylint: disable=protected-access
except NotImplementedError:
raise NotImplementedError('Real-valued columns are not supported. '
'Use weighted_sum_from_feature_columns '
'instead, or bucketize these columns.')
variable, predictions_no_bias = _create_joint_embedding_lookup(
columns_to_tensors,
embedding_lookup_arguments,
num_outputs,
trainable,
weight_collections)
bias = contrib_variables.model_variable(
'bias_weight',
shape=[num_outputs],
initializer=init_ops.zeros_initializer,
collections=_add_variable_collection(weight_collections))
_log_variable(bias)
predictions = nn_ops.bias_add(predictions_no_bias, bias)
return predictions, variable, bias
def weighted_sum_from_feature_columns(columns_to_tensors,
feature_columns,
num_outputs,
weight_collections=None,
trainable=True,
scope=None):
"""A tf.contrib.layer style linear prediction builder based on FeatureColumns.
Generally a single example in training data is described with feature columns.
This function generates weighted sum for each num_outputs. Weighted sum refers
to logits in classification problems. It refers to prediction itself for
linear regression problems.
Example:
```
# Building model for training
feature_columns = (
real_valued_column("my_feature1"),
...
)
columns_to_tensor = tf.parse_example(...)
logits = weighted_sum_from_feature_columns(
columns_to_tensors=columns_to_tensor,
feature_columns=feature_columns,
num_outputs=1)
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits, labels)
```
Args:
columns_to_tensors: A mapping from feature column to tensors. 'string' key
means a base feature (not-transformed). It can have FeatureColumn as a
key too. That means that FeatureColumn is already transformed by input
pipeline. For example, `inflow` may have handled transformations.
feature_columns: A set containing all the feature columns. All items in the
set should be instances of classes derived from FeatureColumn.
num_outputs: An integer specifying number of outputs. Default value is 1.
weight_collections: List of graph collections to which weights are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
A tuple containing:
* A Tensor which represents predictions of a linear model.
* A dictionary which maps feature_column to corresponding Variable.
* A Variable which is used for bias.
Raises:
ValueError: if FeatureColumn cannot be used for linear predictions.
"""
check_feature_columns(feature_columns)
with variable_scope.variable_scope(
scope,
default_name='weighted_sum_from_feature_columns',
values=columns_to_tensors.values()):
output_tensors = []
column_to_variable = dict()
transformer = _Transformer(columns_to_tensors)
# pylint: disable=protected-access
for column in sorted(set(feature_columns), key=lambda x: x.key):
transformed_tensor = transformer.transform(column)
try:
embedding_lookup_arguments = column._wide_embedding_lookup_arguments(
transformed_tensor)
variable, predictions = _create_embedding_lookup(
column,
columns_to_tensors,
embedding_lookup_arguments,
num_outputs,
trainable,
weight_collections)
except NotImplementedError:
with variable_scope.variable_scope(
None,
default_name=column.name,
values=columns_to_tensors.values()):
tensor = column._to_dense_tensor(transformed_tensor)
tensor = fc._reshape_real_valued_tensor(tensor, 2, column.name)
variable = [contrib_variables.model_variable(
name='weight',
shape=[tensor.get_shape()[1], num_outputs],
initializer=init_ops.zeros_initializer,
collections=weight_collections)]
predictions = math_ops.matmul(tensor, variable[0], name='matmul')
except ValueError as ee:
raise ValueError('Error creating weighted sum for column: {}.\n'
'{}'.format(column.name, ee))
output_tensors.append(predictions)
column_to_variable[column] = variable
_log_variable(variable)
_maybe_restore_from_checkpoint(column._checkpoint_path(), variable)
# pylint: enable=protected-access
predictions_no_bias = math_ops.add_n(output_tensors)
bias = contrib_variables.model_variable(
'bias_weight',
shape=[num_outputs],
initializer=init_ops.zeros_initializer,
collections=_add_variable_collection(weight_collections))
_log_variable(bias)
predictions = nn_ops.bias_add(predictions_no_bias, bias)
return predictions, column_to_variable, bias
def parse_feature_columns_from_examples(serialized,
feature_columns,
name=None,
example_names=None):
"""Parses tf.Examples to extract tensors for given feature_columns.
This is a wrapper of 'tf.parse_example'.
Example:
```python
columns_to_tensor = parse_feature_columns_from_examples(
serialized=my_data,
feature_columns=my_features)
# Where my_features are:
# Define features and transformations
sparse_feature_a = sparse_column_with_keys(
column_name="sparse_feature_a", keys=["AB", "CD", ...])
embedding_feature_a = embedding_column(
sparse_id_column=sparse_feature_a, dimension=3, combiner="sum")
sparse_feature_b = sparse_column_with_hash_bucket(
column_name="sparse_feature_b", hash_bucket_size=1000)
embedding_feature_b = embedding_column(
sparse_id_column=sparse_feature_b, dimension=16, combiner="sum")
crossed_feature_a_x_b = crossed_column(
columns=[sparse_feature_a, sparse_feature_b], hash_bucket_size=10000)
real_feature = real_valued_column("real_feature")
real_feature_buckets = bucketized_column(
source_column=real_feature, boundaries=[...])
my_features = [embedding_feature_b, real_feature_buckets, embedding_feature_a]
```
Args:
serialized: A vector (1-D Tensor) of strings, a batch of binary
serialized `Example` protos.
feature_columns: An iterable containing all the feature columns. All items
should be instances of classes derived from _FeatureColumn.
name: A name for this operation (optional).
example_names: A vector (1-D Tensor) of strings (optional), the names of
the serialized protos in the batch.
Returns:
A `dict` mapping FeatureColumn to `Tensor` and `SparseTensor` values.
"""
check_feature_columns(feature_columns)
columns_to_tensors = parsing_ops.parse_example(
serialized=serialized,
features=fc.create_feature_spec_for_parsing(feature_columns),
name=name,
example_names=example_names)
transformer = _Transformer(columns_to_tensors)
for column in sorted(set(feature_columns), key=lambda x: x.key):
transformer.transform(column)
return columns_to_tensors
def parse_feature_columns_from_sequence_examples(
serialized,
context_feature_columns,
sequence_feature_columns,
name=None,
example_name=None):
"""Parses tf.SequenceExamples to extract tensors for given `FeatureColumn`s.
Args:
serialized: A scalar (0-D Tensor) of type string, a single serialized
`SequenceExample` proto.
context_feature_columns: An iterable containing the feature columns for
context features. All items should be instances of classes derived from
`_FeatureColumn`. Can be `None`.
sequence_feature_columns: An iterable containing the feature columns for
sequence features. All items should be instances of classes derived from
`_FeatureColumn`. Can be `None`.
name: A name for this operation (optional).
example_name: A scalar (0-D Tensor) of type string (optional), the names of
the serialized proto.
Returns:
A tuple consisting of:
context_features: a dict mapping `FeatureColumns` from
`context_feature_columns` to their parsed `Tensors`/`SparseTensor`s.
sequence_features: a dict mapping `FeatureColumns` from
`sequence_feature_columns` to their parsed `Tensors`/`SparseTensor`s.
"""
# Sequence example parsing requires a single (scalar) example.
try:
serialized = array_ops.reshape(serialized, [])
except ValueError as e:
raise ValueError(
'serialized must contain as single sequence example. Batching must be '
'done after parsing for sequence examples. Error: {}'.format(e))
if context_feature_columns is None:
context_feature_columns = []
if sequence_feature_columns is None:
sequence_feature_columns = []
check_feature_columns(context_feature_columns)
context_feature_spec = fc.create_feature_spec_for_parsing(
context_feature_columns)
check_feature_columns(sequence_feature_columns)
sequence_feature_spec = fc._create_sequence_feature_spec_for_parsing( # pylint: disable=protected-access
sequence_feature_columns, allow_missing_by_default=False)
return parsing_ops.parse_single_sequence_example(serialized,
context_feature_spec,
sequence_feature_spec,
example_name,
name)
def _log_variable(variable):
if isinstance(variable, list):
for var in variable:
if isinstance(variable, variables.Variable):
logging.info('Created variable %s, with device=%s', var.name,
var.device)
elif isinstance(variable, variables.Variable):
logging.info('Created variable %s, with device=%s', variable.name,
variable.device)
def _infer_real_valued_column_for_tensor(name, tensor):
"""Creates a real_valued_column for given tensor and name."""
if isinstance(tensor, sparse_tensor_py.SparseTensor):
raise ValueError(
'SparseTensor is not supported for auto detection. Please define '
'corresponding FeatureColumn for tensor {} {}.', name, tensor)
if not (tensor.dtype.is_integer or tensor.dtype.is_floating):
raise ValueError(
'Non integer or non floating types are not supported for auto detection'
'. Please define corresponding FeatureColumn for tensor {} {}.', name,
tensor)
shape = tensor.get_shape().as_list()
dimension = 1
for i in range(1, len(shape)):
dimension *= shape[i]
return fc.real_valued_column(name, dimension=dimension, dtype=tensor.dtype)
def infer_real_valued_columns(features):
if not isinstance(features, dict):
return [_infer_real_valued_column_for_tensor('', features)]
feature_columns = []
for key, value in features.items():
feature_columns.append(_infer_real_valued_column_for_tensor(key, value))
return feature_columns
def check_feature_columns(feature_columns):
"""Checks the validity of the set of FeatureColumns.
Args:
feature_columns: A set of instances or subclasses of FeatureColumn.
Raises:
ValueError: If there are duplicate feature column keys.
"""
seen_keys = set()
for f in feature_columns:
key = f.key
if key in seen_keys:
raise ValueError('Duplicate feature column key found for column: {}. '
'This usually means that the column is almost identical '
'to another column, and one must be discarded.'.format(
f.name))
seen_keys.add(key)
class _Transformer(object):
"""Handles all the transformations defined by FeatureColumn if needed.
FeatureColumn specifies how to digest an input column to the network. Some
feature columns require data transformations. This class handles those
transformations if they are not handled already.
Some features may be used in more than one places. For example one can use a
bucketized feature by itself and a cross with it. In that case Transformer
should create only one bucketization op instead of multiple ops for each
feature column. To handle re-use of transformed columns, Transformer keeps all
previously transformed columns.
An example usage of Transformer is as follows:
occupation = sparse_column_with_hash_bucket(column_name="occupation",
hash_bucket_size=1000)
age = real_valued_column("age")
age_buckets = bucketized_column(
source_column=age,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
occupation_x_age = crossed_column(columns=[occupation, age_buckets],
hash_bucket_size=10000)
columns_to_tensor = tf.parse_example(...)
transformer = Transformer(columns_to_tensor)
occupation_x_age_tensor = transformer.transform(occupation_x_age)
occupation_tensor = transformer.transform(occupation)
age_buckets_tensor = transformer.transform(age_buckets)
"""
def __init__(self, columns_to_tensors):
"""Initializes transfomer.
Args:
columns_to_tensors: A mapping from feature columns to tensors. 'string'
key means a base feature (not-transformed). It can have FeatureColumn as
a key too. That means that FeatureColumn is already transformed by input
pipeline. For example, `inflow` may have handled transformations.
Transformed features are inserted in columns_to_tensors.
"""
self._columns_to_tensors = columns_to_tensors
def transform(self, feature_column):
"""Returns a Tensor which represents given feature_column.
Args:
feature_column: An instance of FeatureColumn.
Returns:
A Tensor which represents given feature_column. It may create a new Tensor
or re-use an existing one.
Raises:
ValueError: if FeatureColumn cannot be handled by this Transformer.
"""
logging.debug('Transforming feature_column %s', feature_column)
if feature_column in self._columns_to_tensors:
# Feature_column is already transformed.
return self._columns_to_tensors[feature_column]
feature_column.insert_transformed_feature(self._columns_to_tensors)
if feature_column not in self._columns_to_tensors:
raise ValueError('Column {} is not supported.'.format(
feature_column.name))
return self._columns_to_tensors[feature_column]
def _add_variable_collection(weight_collections):
if weight_collections:
weight_collections = list(
set(list(weight_collections) + [ops.GraphKeys.GLOBAL_VARIABLES]))
return weight_collections
# TODO(jamieas): remove the following logic once all FeatureColumn types are
# supported for sequences.
# pylint: disable=protected-access
_SUPPORTED_SEQUENCE_COLUMNS = (fc._OneHotColumn,
fc._EmbeddingColumn,
fc._RealValuedColumn)
_FORBIDDEN_SEQUENCE_COLUMNS = (fc._HashedEmbeddingColumn,
fc._BucketizedColumn,
fc._CrossedColumn)
def _check_supported_sequence_columns(feature_columns):
"""Asserts `feature_columns` are in `_SUPPORTED_SEQUENCE_COLUMNS`."""
for col in feature_columns:
if not isinstance(col, _SUPPORTED_SEQUENCE_COLUMNS):
raise ValueError(
'FeatureColumn type {} is not currently supported for sequence data.'.
format(type(col).__name__))
def _get_parent_columns(feature_column):
"""Returns the tuple of `FeatureColumn`s that `feature_column` depends on."""
if isinstance(feature_column, (fc._WeightedSparseColumn,
fc._OneHotColumn,
fc._EmbeddingColumn,)):
return (feature_column.sparse_id_column,)
if isinstance(feature_column, (fc._BucketizedColumn,)):
return (feature_column.source_column,)
if isinstance(feature_column, (fc._CrossedColumn)):
return tuple(feature_column.columns)
return tuple()
def _gather_feature_columns(feature_columns):
"""Returns a list of all ancestor `FeatureColumns` of `feature_columns`."""
gathered = list(feature_columns)
i = 0
while i < len(gathered):
for column in _get_parent_columns(gathered[i]):
if column not in gathered:
gathered.append(column)
i += 1
return gathered
def _check_forbidden_sequence_columns(feature_columns):
"""Recursively cecks `feature_columns` for `_FORBIDDEN_SEQUENCE_COLUMNS`."""
all_feature_columns = _gather_feature_columns(feature_columns)
for feature_column in all_feature_columns:
if isinstance(feature_column, _FORBIDDEN_SEQUENCE_COLUMNS):
raise ValueError(
'Column {} is of type {}, which is not currently supported for '
'sequences.'.format(feature_column.name,
type(feature_column).__name__))
|
jeffzheng1/tensorflow
|
tensorflow/contrib/layers/python/layers/feature_column_ops.py
|
Python
|
apache-2.0
| 36,030
|
# ----------------------------------------------------------------------------
# Copyright 2015-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
from __future__ import division
from builtins import str
import h5py
import numpy as np
def create_minibatch_x(minibatches, minibatch_markers, epoch_axis):
"""
Helper function to build x axis for data captured per minibatch.
Arguments:
minibatches (int): how many total minibatches
minibatch_markers (int array): cumulative number of minibatches complete at a given epoch
epoch_axis (bool): whether to render epoch or minibatch as the integer step in the x axis
"""
if epoch_axis:
x = np.zeros((minibatches,))
last_e = 0
for e_idx, e in enumerate(minibatch_markers):
e_minibatches = e - last_e
x[last_e:e] = e_idx + (np.arange(float(e_minibatches)) / e_minibatches)
last_e = e
else:
x = np.arange(minibatches)
return x
def create_epoch_x(points, epoch_freq, minibatch_markers, epoch_axis):
"""
Helper function to build x axis for points captured per epoch.
Arguments:
points (int): how many data points need a corresponding x axis points
epoch_freq (int): are points once an epoch or once every n epochs?
minibatch_markers (int array): cumulative number of minibatches complete at a given epoch
epoch_axis (bool): whether to render epoch or minibatch as the integer step in the x axis
"""
if epoch_axis:
x = np.zeros((points,))
last_e = 0
for e_idx, e in enumerate(minibatch_markers):
e_minibatches = e - last_e
if (e_idx + 1) % epoch_freq == 0:
x[e_idx // epoch_freq] = e_idx + ((e_minibatches - 1) // e_minibatches)
last_e = e
else:
x = minibatch_markers[(epoch_freq - 1)::epoch_freq] - 1
return x
def h5_cost_data(filename, epoch_axis=True):
"""
Read cost data from hdf5 file. Generate x axis data for each cost line.
Arguments:
filename (str): Filename with hdf5 cost data
epoch_axis (bool): whether to render epoch or minibatch as the integer step in the x axis
Returns:
list of tuples of (name, x data, y data)
"""
ret = list()
with h5py.File(filename, "r") as f:
config, cost, time_markers = [f[x] for x in ['config', 'cost', 'time_markers']]
total_epochs = config.attrs['total_epochs']
total_minibatches = config.attrs['total_minibatches']
minibatch_markers = time_markers['minibatch']
for name, ydata in cost.items():
y = ydata[...]
if ydata.attrs['time_markers'] == 'epoch_freq':
y_epoch_freq = ydata.attrs['epoch_freq']
assert len(y) == total_epochs // y_epoch_freq
x = create_epoch_x(len(y), y_epoch_freq, minibatch_markers, epoch_axis)
elif ydata.attrs['time_markers'] == 'minibatch':
assert len(y) == total_minibatches
x = create_minibatch_x(total_minibatches, minibatch_markers, epoch_axis)
else:
raise TypeError('Unsupported data format for h5_cost_data')
ret.append((name, x, y))
return ret
def h5_hist_data(filename, epoch_axis=True):
"""
Read histogram data from hdf5 file. Generate x axis data for each hist line.
Arguments:
filename (str): Filename with hdf5 cost data
epoch_axis (bool): whether to render epoch or minibatch as the integer step in the x axis
Returns:
list of tuples of (name, data, dh, dw, bins, offset)
"""
ret = list()
with h5py.File(filename, "r") as f:
if 'hist' in f:
hists, config = [f[x] for x in ['hist', 'config']]
bins, offset, time_markers = [hists.attrs[x]
for x in ['bins', 'offset', 'time_markers']]
total_epochs = config.attrs['total_epochs']
total_minibatches = config.attrs['total_minibatches']
for hname, hdata in hists.items():
dw = total_epochs if (time_markers == 'epoch_freq') else total_minibatches
dh = bins
ret.append((hname, hdata[...], dh, dw, bins, offset))
return ret
def convert_rgb_to_bokehrgba(img_data, downsample=1):
"""
Convert RGB image to two-dimensional array of RGBA values (encoded as 32-bit integers)
(required by Bokeh). The functionality is currently not available in Bokeh.
An issue was raised here: https://github.com/bokeh/bokeh/issues/1699 and this function is a
modified version of the suggested solution.
Arguments:
img_data: img (ndarray, shape: [N, M, 3], dtype: uint8): image data
dh: height of image
dw: width of image
Returns:
img (ndarray): 2D image array of RGBA values
"""
if img_data.dtype != np.uint8:
raise NotImplementedError
if img_data.ndim != 3:
raise NotImplementedError
# downsample for render performance, v-flip since plot origin is bottom left
# img_data = np.transpose(img_data, (1,2,0))
img_data = img_data[::-downsample, ::downsample, :]
img_h, img_w, C = img_data.shape
# add an alpha channel to the image and recast from pixels of u8u8u8u8 to u32
bokeh_img = np.dstack([img_data, 255 * np.ones((img_h, img_w), np.uint8)])
final_image = bokeh_img.reshape(img_h, img_w * (C + 1)).view(np.uint32)
return final_image
def h5_deconv_data(filename):
"""
Read deconv visualization data from hdf5 file.
Arguments:
filename (str): Filename with hdf5 deconv data
Returns:
list of lists. Each inner list represents one layer, and consists of
tuples (fm, deconv_data)
"""
ret = list()
with h5py.File(filename, "r") as f:
if 'deconv' not in list(f.keys()):
return None
act_data = f['deconv/max_act']
img_data = f['deconv/img']
for layer in list(act_data.keys()):
layer_data = list()
for fm in range(act_data[layer]['vis'].shape[0]):
# to avoid storing entire dataset, imgs are cached as needed, have to look up
batch_ind, img_ind = act_data[layer]['batch_img'][fm]
img_store = img_data['batch_{}'.format(batch_ind)]
img_cache_ofs = img_store.attrs[str(img_ind)]
# have to convert from rgb to rgba and cast as uint32 dtype for bokeh
plot_img = convert_rgb_to_bokehrgba(img_store['HWC_uint8'][:, :, :, img_cache_ofs])
plot_deconv = convert_rgb_to_bokehrgba(act_data[layer]['vis'][fm])
layer_data.append((fm, plot_deconv, plot_img))
ret.append((layer, layer_data))
return ret
|
Jokeren/neon
|
neon/visualizations/data.py
|
Python
|
apache-2.0
| 7,474
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
def upgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
event = sqlalchemy.Table('event', meta, autoload=True)
event.c.logical_resource_id.alter(name='resource_name')
|
rdo-management/heat
|
heat/db/sqlalchemy/migrate_repo/versions/024_event_resource_name.py
|
Python
|
apache-2.0
| 807
|
import logging
import StringIO
from iso8601 import parse_date
from datetime import datetime
from pylons import request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from lr.model.base_model import appConfig
from lr.lib.base import BaseController, render
import json
import ijson
import collections, sys
import math
from urllib2 import urlopen,HTTPError
import lr.lib.helpers as h
log = logging.getLogger(__name__)
import couchdb
class ExtractController(BaseController):
"""REST Controller styled on the Atom Publishing Protocol"""
# To properly map this controller, ensure your config/routing.py
# file has a resource setup:
# map.resource('extract', 'extract')
def _getView(self,view='_all_docs',keys=[], includeDocs=True,startKey=None,endKey=None):
args = {'include_docs':includeDocs}
if len(keys) > 0:
args['keys'] = keys
args['reduce'] = False
args['stale'] = appConfig['couchdb.stale.flag']
if startKey is not None:
args['startkey'] = startKey
if endKey is not None:
args['endkey'] = endKey
db_url = '/'.join([appConfig['couchdb.url'],appConfig['couchdb.db.resourcedata']])
view = h.getResponse(database_url=db_url,view_name=view,**args)
return view
def _convertDateTime(self, dt):
try:
epoch = parse_date("1970-01-01T00:00:00Z")
if isinstance(dt, str) or isinstance(dt,unicode):
dt = parse_date(dt)
dt = dt - epoch
return int(math.floor(dt.total_seconds()))
except:
abort(500,"Invalid Date Format")
def _processRequest(self,startKey, endKey,urlBase, includeDocs=True):
def streamResult(resp):
CHUNK_SIZE=1024
data = resp.read(CHUNK_SIZE)
while len(data) > 0:
yield data
data = resp.read(CHUNK_SIZE)
try:
resp = self._getView(urlBase,startKey=startKey,endKey=endKey,includeDocs=includeDocs)
return streamResult(resp)
except HTTPError as ex:
abort(404, "not found")
def _orderParmaByView(self,params,view):
def makeEndKey(key):
from copy import deepcopy
newkey = deepcopy(key)
#if complex key
if isinstance(newkey, list):
# get last element in key
last = newkey[-1]
# if the last element is a list, just append an empty object to the last element's list
if isinstance(last, list):
last.append({})
# if the last element in an object, it becomes a bit tricky
# *** note that the key parameter MUST have been originally json parsed with the object_pairs_hook=collections.OrderedDict otherwise
# key order won't be guaranteed to be the same as what CouchDB will use!!!!
elif isinstance(last, dict):
lastkey = last.keys()[-1]
# since there's no easy way to increment a float accurately, instead append a new key that 'should' sort after the previous key.
if (isinstance(last[lastkey], float)):
last[lastkey+u'\ud7af'] = None
# if it's something else... this thing should recurse and keep going.
else:
last[lastkey] = makeEndKey(last[lastkey])
# if we got here, it's nothing really special, so we'll just append a {} to newkey
else:
newkey.append({})
# this if to handle the odd case where we have string as either the key or the value of an object in a complex key.
elif isinstance(newkey, (str, unicode, basestring)):
newkey=newkey+u'\ud7af'
# integer... so just increment 1.
elif isinstance(newkey, int):
newkey += 1
# if we skipped everything else - we don't have a strategy to deal with it... so don't
return newkey
def makeStartsWithEndKey(key):
from copy import deepcopy
newkey = deepcopy(key)
# this the base case for keys that are just strings, append the funky unicode char so that it grabs everything from
# "foo" to "foo\ud7af", which is technically the only way we know how to deal with starts with.
if isinstance(newkey, (str, unicode, basestring)):
newkey=newkey+u'\ud7af'
# if this is a complex key, then get the last element and recurse
elif isinstance(newkey, list):
newkey[-1] = makeStartsWithEndKey(newkey[-1])
# if the last element in an object, it becomes a bit tricky, because you must modify the last key, which implies
# order of keys was maintained when the value was originally parsed.
# *** IMPORTANT: The key parameter MUST have been originally json parsed with the object_pairs_hook=collections.OrderedDict otherwise
# *** key order won't be guaranteed to be the same as what CouchDB will use!!!!
elif isinstance(last, dict):
lastkey = last.keys()[-1]
#take the value from the last key and recurse.
last[lastkey] = makeEndKey(last[lastkey])
# if we skipped everything else - we don't have a strategy to deal with it as a Starts With key, so just return
else:
newkey = key
return newkey
def hasParamFor(funcName):
if funcName == 'ts' and ('from' in params or 'until' in params):
return True
elif funcName == 'discriminator' and ('discriminator' in params or 'discriminator-starts-with' in params):
return True
elif funcName == 'resource' and ('resource' in params or 'resource-starts-with' in params):
return True
else:
return False
def populateTs(startKey, endKey, pos, isLast):
if 'from' in params:
startKey.append(self._convertDateTime(params['from']))
elif pos == 1:
startKey.append(self._convertDateTime(datetime.min.isoformat() + "Z"))
if 'until' in params:
endKey.append(self._convertDateTime(params['until']))
elif pos == 1:
endKey.append(self._convertDateTime(datetime.utcnow().isoformat()+"Z"))
return startKey, endKey
def populateDiscriminator(startKey, endKey, pos, isLast):
if 'discriminator' in params:
# preserve key order!!!
try:
discriminator = json.loads(params['discriminator'], object_pairs_hook=collections.OrderedDict)
except:
log.error(sys.exc_info()[0])
discriminator = params['discriminator']
startKey.append(discriminator)
endKey.append(discriminator)
elif 'discriminator-starts-with' in params:
# preserve key order!!!
try:
discriminator = json.loads(params['discriminator-starts-with'], object_pairs_hook=collections.OrderedDict)
except:
log.error(sys.exc_info()[0])
discriminator = params['discriminator-starts-with']
startKey.append(discriminator)
endKey.append(discriminator)
endKey = makeStartsWithEndKey(endKey)
return startKey, endKey
# else:
# startKey.append('')
# endKey.append(u'\ud7af')
def populateResource(startKey, endKey, pos, isLast):
if 'resource' in params:
startKey.append(params['resource'])
endKey.append(params['resource'])
elif 'resource-starts-with' in params:
startKey.append(params['resource-starts-with'])
endKey.append(params['resource-starts-with']+u'\ud7af')
return startKey, endKey
# else:
# startKey.append('')
# endKey.append(u'\ud7af')
startKey=[]
endKey=[]
includeDocs = True
if "ids_only" in params:
includeDocs = not params
funcs = {
"discriminator":populateDiscriminator,
'resource':populateResource,
'ts':populateTs
}
queryOrderParts = view.split('-by-')
aggregate = queryOrderParts[0]
queryParams= queryOrderParts[1].split('-')
# if we don't have explicit params for this, then omit.
if hasParamFor(aggregate):
queryParams.append(aggregate)
log.error("added aggregate")
for pos, q in enumerate(queryParams,start=1):
startkey, endKey = funcs[q](startKey, endKey, pos, len(queryParams)==pos)
if len(endKey) > 0 and 'resource-starts-with' not in params and 'discriminator-starts-with' not in params:
log.error("making endkey")
endKey = makeEndKey(endKey)
# startkey, endKey = funcs[aggregate](startKey, endKey, len(queryParams)+1, True)
return startKey if len(startKey) > 0 else None, endKey if len(endKey) > 0 else None, includeDocs
def get(self, dataservice="",view='',list=''):
"""GET /extract/id: Show a specific intem"""
try:
db_url = '/'.join([appConfig['couchdb.url'],appConfig['couchdb.db.resourcedata']])
db = couchdb.Database(db_url)
dsDocument = db['_design/'+dataservice]
if "dataservices" not in dsDocument:
abort(406, "Invalid Data Service")
log.error("no dataservices element in document")
urlBase = "_design/{0}/_list/{1}/{2}".format(dataservice,list,view)
startKey, endKey,includeDocs = self._orderParmaByView(request.params,view)
return self._processRequest(startKey,endKey,urlBase,includeDocs)
except couchdb.ResourceNotFound as ex:
abort(406,"Invalid Data Service")
log.error(ex)
|
jimklo/LearningRegistry
|
LR/lr/controllers/extract.py
|
Python
|
apache-2.0
| 10,398
|
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
from http.cookiejar import LWPCookieJar
from pants.process.lock import OwnerPrintingInterProcessFileLock
from pants.subsystem.subsystem import Subsystem
from pants.util.dirutil import safe_mkdir_for
from pants.util.memo import memoized_property
class Cookies(Subsystem):
options_scope = "cookies"
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--path",
advanced=True,
fingerprint=True,
default=os.path.join(register.bootstrap.pants_bootstrapdir, "auth", "cookies"),
help="Path to file that stores persistent cookies. "
"Defaults to <pants bootstrap dir>/auth/cookies.",
)
def update(self, cookies):
"""Add specified cookies to our cookie jar, and persists it.
:param cookies: Any iterable that yields http.cookiejar.Cookie instances, such as a CookieJar.
"""
cookie_jar = self.get_cookie_jar()
for cookie in cookies:
cookie_jar.set_cookie(cookie)
with self._lock:
cookie_jar.save()
def get_cookie_jar(self):
"""Returns our cookie jar."""
cookie_file = self._get_cookie_file()
cookie_jar = LWPCookieJar(cookie_file)
if os.path.exists(cookie_file):
cookie_jar.load()
else:
safe_mkdir_for(cookie_file)
# Save an empty cookie jar so we can change the file perms on it before writing data to it.
with self._lock:
cookie_jar.save()
os.chmod(cookie_file, 0o600)
return cookie_jar
def _get_cookie_file(self):
# We expanduser to make it easy for the user to config the cookies into their homedir.
return os.path.realpath(os.path.expanduser(self.get_options().path))
@memoized_property
def _lock(self):
"""An identity-keyed inter-process lock around the cookie file."""
lockfile = "{}.lock".format(self._get_cookie_file())
safe_mkdir_for(lockfile)
return OwnerPrintingInterProcessFileLock(lockfile)
|
tdyas/pants
|
src/python/pants/auth/cookies.py
|
Python
|
apache-2.0
| 2,261
|
# Copyright 2016-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""
Unit tests for the arpcache module
"""
import os
import subprocess
import sys
import unittest
import mock
import moduletests.src.arpcache
try:
# Python 2.x
from cStringIO import StringIO
except ImportError:
# Python 3.x
from io import StringIO
if sys.hexversion >= 0x3040000:
# contextlib.redirect_stdout was introduced in Python 3.4
import contextlib
else:
# contextlib2 is a backport of contextlib from Python 3.5 and is compatible with Python2/3
import contextlib2 as contextlib
class TestArpcache(unittest.TestCase):
config_file_path = "/etc/sysctl.d/55-arp-gc_thresh1.conf"
def setUp(self):
self.output = StringIO()
def tearDown(self):
self.output.close()
@mock.patch("subprocess.check_output")
def test_detect_noproblem(self, check_output_mock):
check_output_mock.return_value = "net.ipv4.neigh.default.gc_thresh1 = 0"
self.assertFalse(moduletests.src.arpcache.detect())
self.assertTrue(check_output_mock.called)
@mock.patch("subprocess.check_output")
def test_detect_problem(self, check_output_mock):
check_output_mock.return_value = "net.ipv4.neigh.default.gc_thresh1 = 1"
self.assertTrue(moduletests.src.arpcache.detect())
self.assertTrue(check_output_mock.called)
@mock.patch("subprocess.check_output", side_effect=subprocess.CalledProcessError(
"1", "test", "/etc/sysctl.d/55-arp-gc_thresh1.conf: no such file or directory"))
def test_fix_cpe(self, check_output_mock):
with contextlib.redirect_stdout(self.output):
self.assertRaises(subprocess.CalledProcessError, moduletests.src.arpcache.fix, self.config_file_path)
self.assertTrue(self.output.getvalue().endswith(
"[UNFIXED] 'sysctl -w net.ipv4.neigh.default.gc_thresh1=0' failed for running system\n"))
self.assertTrue(check_output_mock.called)
@mock.patch("subprocess.check_output")
@mock.patch("moduletests.src.arpcache.os.path.exists", side_effect=[False])
@mock.patch("moduletests.src.arpcache.open", mock.mock_open(read_data="stuff"))
def test_fix_exists_sudo_true(self, check_output_mock, exists_mock):
check_output_mock.return_value = "True"
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.arpcache.fix(self.config_file_path))
self.assertTrue(self.output.getvalue().endswith(
"[FIXED] set net.ipv4.neigh.default.gc_thresh1=0 for running system\n"
"[FIXED] net.ipv4.neigh.default.gc_thresh1=0 in /etc/sysctl.d/55-arp-gc_thresh1.conf\n"))
self.assertTrue(check_output_mock.called)
self.assertTrue(exists_mock.called)
@mock.patch("subprocess.check_output")
@mock.patch("moduletests.src.arpcache.os.path.exists", side_effect=[True])
@mock.patch("moduletests.src.arpcache.open", mock.mock_open(read_data="net.ipv4.neigh.default.gc_thresh1 = 0\n"
"something else\n"))
def test_fix_sudo_true(self, check_output_mock, exists_mock):
check_output_mock.return_value = "True"
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.arpcache.fix(self.config_file_path))
self.assertTrue(self.output.getvalue().endswith(
"[FIXED] set net.ipv4.neigh.default.gc_thresh1=0 for running system\n"
"[FIXED] net.ipv4.neigh.default.gc_thresh1=0 in /etc/sysctl.d/55-arp-gc_thresh1.conf\n"))
self.assertTrue(check_output_mock.called)
self.assertTrue(exists_mock.called)
@mock.patch("subprocess.check_output")
@mock.patch("moduletests.src.arpcache.os.path.exists", side_effect=[True])
@mock.patch("moduletests.src.arpcache.open", mock.mock_open(read_data="net.ipv4.neigh.default.gc_thresh1 = 0\n"
"net.ipv4.neigh.default.gc_thresh1 = 0\n"))
def test_fix_sudo_true_found_twice(self, check_output_mock, exists_mock):
check_output_mock.return_value = "True"
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.arpcache.fix(self.config_file_path))
self.assertTrue(self.output.getvalue().endswith(
"[FIXED] set net.ipv4.neigh.default.gc_thresh1=0 for running system\n"
"[FIXED] net.ipv4.neigh.default.gc_thresh1=0 in /etc/sysctl.d/55-arp-gc_thresh1.conf\n"))
self.assertTrue(check_output_mock.called)
self.assertTrue(exists_mock.called)
@mock.patch("subprocess.check_output")
@mock.patch("moduletests.src.arpcache.os.path.exists", side_effect=[False])
@mock.patch("moduletests.src.arpcache.open", side_effect=IOError)
def test_fix_writefail(self, open_mock, exists_mock, check_output_mock):
check_output_mock.return_value = "True"
with contextlib.redirect_stdout(self.output):
self.assertRaises(IOError, moduletests.src.arpcache.fix, self.config_file_path)
self.assertTrue(check_output_mock.called)
self.assertTrue(exists_mock.called)
self.assertTrue(open_mock.called)
self.assertTrue(self.output.getvalue().endswith(
"[UNFIXED] Failed to write config to /etc/sysctl.d/55-arp-gc_thresh1.conf\n"))
@mock.patch("moduletests.src.arpcache.detect", return_value=False)
def test_run_success(self, detect_mock):
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.arpcache.run())
self.assertTrue(self.output.getvalue().endswith("Determining if aggressive ARP caching is enabled\n"
"[SUCCESS] Aggressive arp caching is disabled.\n"))
self.assertTrue(detect_mock.called)
@mock.patch("moduletests.src.arpcache.get_config_dict")
@mock.patch("moduletests.src.arpcache.detect", return_value=True)
def test_run_no_remediate(self, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": dict(),
"REMEDIATE": False,
"SUDO": True}
with contextlib.redirect_stdout(self.output):
moduletests.src.arpcache.run()
self.assertTrue("[UNFIXED] Remediation impossible without sudo and --remediate.\n"
"-- Running as root/sudo: True\n"
"-- Required --remediate flag specified: False\n"
"[FAILURE] Aggressive arp caching is enabled."
in self.output.getvalue())
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.arpcache.get_config_dict")
@mock.patch("moduletests.src.arpcache.detect", return_value=True)
@mock.patch("moduletests.src.arpcache.os.path.isfile", return_value=True)
@mock.patch("moduletests.src.arpcache.backup", return_value=True)
@mock.patch("moduletests.src.arpcache.fix", return_value=True)
@mock.patch("moduletests.src.arpcache.restore", return_value=True)
def test_run_failure_isfile(self, restore_mock, fix_mock, backup_mock, isfile_mock, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": {self.config_file_path: "/some/path"},
"REMEDIATE": True,
"SUDO": True}
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.arpcache.run())
self.assertTrue("Determining if aggressive ARP caching is enabled\n"
"[FAILURE] Aggressive arp caching is enabled. "
"This can cause issues communicating with instances in the same subnet"
in self.output.getvalue())
self.assertTrue(restore_mock.called)
self.assertTrue(fix_mock.called)
self.assertTrue(backup_mock.called)
self.assertTrue(isfile_mock.called)
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.arpcache.get_config_dict")
@mock.patch("moduletests.src.arpcache.detect", return_value=True)
@mock.patch("moduletests.src.arpcache.os.path.isfile", return_value=False)
@mock.patch("moduletests.src.arpcache.fix", return_value=True)
def test_run_failure(self, fix_mock, isfile_mock, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": dict(),
"REMEDIATE": True,
"SUDO": True}
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.arpcache.run())
self.assertTrue("Determining if aggressive ARP caching is enabled\n"
"[FAILURE] Aggressive arp caching is enabled. "
"This can cause issues communicating with instances in the same subnet"
in self.output.getvalue())
self.assertTrue(fix_mock.called)
self.assertTrue(isfile_mock.called)
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.arpcache.get_config_dict")
@mock.patch("moduletests.src.arpcache.detect", side_effect=(True, False))
@mock.patch("moduletests.src.arpcache.os.path.isfile", return_value=False)
@mock.patch("moduletests.src.arpcache.fix", return_value=True)
def test_run_fix(self, fix_mock, isfile_mock, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": dict(),
"REMEDIATE": True,
"SUDO": True}
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.arpcache.run())
self.assertTrue(self.output.getvalue().endswith("Determining if aggressive ARP caching is enabled\n"
"[SUCCESS] Aggressive arp caching is disabled after "
"remediation. Please see the logs for further details\n"))
self.assertTrue(fix_mock.called)
self.assertTrue(isfile_mock.called)
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.arpcache.get_config_dict")
@mock.patch("moduletests.src.arpcache.detect", side_effect=Exception)
@mock.patch("moduletests.src.arpcache.restore", return_value=True)
def test_run_detect_exception(self, restore_mock, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": {self.config_file_path: "/some/path"},
"REMEDIATE": True}
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.arpcache.run())
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
self.assertTrue(restore_mock.called)
@mock.patch("moduletests.src.arpcache.get_config_dict", side_effect=Exception)
def test_run_config_exception(self, config_mock):
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.arpcache.run())
self.assertTrue(config_mock.called)
|
gregbdunn/aws-ec2rescue-linux
|
tools/moduletests/unit/test_arpcache.py
|
Python
|
apache-2.0
| 12,661
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""x509 module tests."""
import array
import types
from google.apputils import app
from google.apputils import basetest
import mox
import stubout
from pyasn1.type import univ
from simian.auth import x509
from simian.auth import tlslite_bridge
class Error(Exception):
"""Base Error."""
class X509ModuleTest(mox.MoxTestBase):
def setUp(self):
mox.MoxTestBase.setUp(self)
self.stubs = stubout.StubOutForTesting()
def tearDown(self):
self.mox.UnsetStubs()
self.stubs.UnsetAll()
def testLoadPemGeneric(self):
"""Test LoadPemGeneric()."""
header = 'BEGIN'
footer = 'END'
input = '\n\n\n-----BEGIN-----\nhello\n-----END-----\n\n\n'
expected = [
'-----BEGIN-----',
'hello',
'-----END-----',
]
self.assertEqual(expected, x509.LoadPemGeneric(input, header, footer))
def testLoadPemGenericWhenInfo(self):
"""Test LoadPemGeneric()."""
header = 'BEGIN'
footer = 'END'
input = ('\n\n\n-----BEGIN-----\n'
'Proc-Type: foo\nhello\n-----END-----\n\n\n')
expected = [
'-----BEGIN-----',
'hello',
'-----END-----',
]
self.assertEqual(expected, x509.LoadPemGeneric(input, header, footer))
def testLoadPemGenericWhenSpaces(self):
"""Test LoadPemGeneric()."""
header = 'BEGIN'
footer = 'END'
input = ' \n\n\n-----BEGIN----- \nhello \n-----END----- \n\n\n '
expected = [
'-----BEGIN-----',
'hello',
'-----END-----',
]
self.assertEqual(expected, x509.LoadPemGeneric(input, header, footer))
def testLoadPemGenericWhenSpacesNoLastNewline(self):
"""Test LoadPemGeneric()."""
header = 'BEGIN'
footer = 'END'
input = ' \n\n\n-----BEGIN----- \nhello \n-----END-----'
expected = [
'-----BEGIN-----',
'hello',
'-----END-----',
]
self.assertEqual(expected, x509.LoadPemGeneric(input, header, footer))
def testLoadPemGenericWhenMissingHeader(self):
"""Test LoadPemGeneric()."""
header = 'BEGIN BLAH'
footer = 'END BLAH'
input = '\n\n\n-----BEGIN-----\nhello\n-----END-----\n\n\n'
self.assertRaises(
x509.HeaderMissingPEMFormatError, x509.LoadPemGeneric,
input, header, footer)
def testLoadPemGenericWhenMissingFooter(self):
"""Test LoadPemGeneric()."""
header = 'BEGIN'
footer = 'END BLAH'
input = '\n\n\n-----BEGIN-----\nhello\n-----END-----\n\n\n'
self.assertRaises(
x509.FooterMissingPEMFormatError, x509.LoadPemGeneric,
input, header, footer)
def testLoadPemGenericWhenTooFewLines(self):
"""Test LoadPemGeneric()."""
header = 'BEGIN'
footer = 'END BLAH'
input = '\n\n\n-----BEGIN-----\n\n\n\n'
self.assertRaises(
x509.PEMFormatError, x509.LoadPemGeneric, input, header, footer)
def testLoadCertificateFromPEM(self):
"""Test LoadCertificateFromPEM()."""
header = 'BEGIN CERTIFICATE'
footer = 'END CERTIFICATE'
pem_input = 'pem_input'
pem_output = ['---header---', 'base64', '---footer---']
self.mox.StubOutWithMock(x509, 'LoadPemGeneric')
self.mox.StubOutWithMock(x509, 'LoadCertificateFromBase64')
x509.LoadPemGeneric(pem_input, header, footer).AndReturn(pem_output)
x509.LoadCertificateFromBase64('base64').AndReturn('ok')
self.mox.ReplayAll()
self.assertEqual(x509.LoadCertificateFromPEM(pem_input), 'ok')
self.mox.VerifyAll()
def testLoadRSAPrivateKeyFromPEM(self):
"""Test LoadRSAPrivateKeyFromPEM()."""
header = 'BEGIN RSA PRIVATE KEY'
footer = 'END RSA PRIVATE KEY'
pem_input = 'pem_input'
pem_output = ['---header---', 'base64', '---footer---']
self.mox.StubOutWithMock(x509, 'LoadPemGeneric')
self.mox.StubOutWithMock(
x509.tlslite_bridge, 'parsePEMKey')
x509.LoadPemGeneric(pem_input, header, footer).AndReturn(pem_output)
x509.tlslite_bridge.parsePEMKey(
'\n'.join(pem_output)).AndReturn('ok')
self.mox.ReplayAll()
self.assertEqual(x509.LoadRSAPrivateKeyFromPEM(pem_input), 'ok')
self.mox.VerifyAll()
def testLoadRSAPrivateKeyFromPEMWhenSyntaxError(self):
"""Test LoadRSAPrivateKeyFromPEM()."""
header = 'BEGIN RSA PRIVATE KEY'
footer = 'END RSA PRIVATE KEY'
pem_input = 'pem_input'
pem_output = ['---header---', 'base64', '---footer---']
self.mox.StubOutWithMock(x509, 'LoadPemGeneric')
self.mox.StubOutWithMock(
x509.tlslite_bridge, 'parsePEMKey')
x509.LoadPemGeneric(pem_input, header, footer).AndReturn(pem_output)
x509.tlslite_bridge.parsePEMKey(
'\n'.join(pem_output)).AndRaise(SyntaxError)
self.mox.ReplayAll()
self.assertRaises(
x509.RSAPrivateKeyPEMFormatError,
x509.LoadRSAPrivateKeyFromPEM, pem_input)
self.mox.VerifyAll()
def testLoadCertificateFromBase64(self):
"""Test LoadCertificateFromBase64()."""
self.mox.StubOutWithMock(x509.base64, 'b64decode')
self.mox.StubOutWithMock(x509, 'BASE64_RE')
x509.BASE64_RE.search('b64str').AndReturn(True)
x509.base64.b64decode('b64str').AndReturn('binary')
mock_x509 = self.mox.CreateMockAnything()
self.stubs.Set(x509, 'X509Certificate', mock_x509)
mock_x509().AndReturn(mock_x509)
mock_x509.LoadFromByteString('binary').AndReturn(None)
self.mox.ReplayAll()
self.assertEqual(
mock_x509,
x509.LoadCertificateFromBase64('b64str'))
self.mox.VerifyAll()
def testLoadCertificateFromBase64WhenBase64CharacterCheckFail(self):
"""Test LoadCertificateFromBase64()."""
self.mox.StubOutWithMock(x509.base64, 'b64decode')
self.mox.StubOutWithMock(x509, 'BASE64_RE')
x509.BASE64_RE.search('b64str').AndReturn(None)
self.mox.ReplayAll()
self.assertRaises(
x509.PEMFormatError,
x509.LoadCertificateFromBase64, 'b64str')
self.mox.VerifyAll()
def testLoadCertificateFromBase64WhenBase64DecodeFail(self):
"""Test LoadCertificateFromBase64()."""
self.mox.StubOutWithMock(x509.base64, 'b64decode')
self.mox.StubOutWithMock(x509, 'BASE64_RE')
x509.BASE64_RE.search('b64str').AndReturn(True)
x509.base64.b64decode('b64str').AndRaise(TypeError)
self.mox.ReplayAll()
self.assertRaises(
x509.PEMFormatError,
x509.LoadCertificateFromBase64, 'b64str')
self.mox.VerifyAll()
class BaseDataObjectTest(mox.MoxTestBase):
"""Test BaseDataObject class."""
def setUp(self):
mox.MoxTestBase.setUp(self)
self.stubs = stubout.StubOutForTesting()
self.bdo = x509.BaseDataObject()
def tearDown(self):
self.mox.UnsetStubs()
self.stubs.UnsetAll()
def testGetDataDict(self):
"""Test _GetDataDict()."""
try:
self.bdo._GetDataDict()
self.fail('NotImplementedError not raised')
except NotImplementedError:
pass
def testCreateGetMethod(self):
"""Test CreateGetMethod()."""
mock_dataobj = self.mox.CreateMockAnything()
mock_dataobj._GetDataDict().AndReturn({'foo': 123})
def mock_setattr(cls, key, value):
self.assertEquals(key, 'GetFoo')
self.assertTrue(type(value) is types.FunctionType)
self.assertEqual(123, value(mock_dataobj))
self.mox.ReplayAll()
x509.BaseDataObject.CreateGetMethod('Foo', 'foo', setattr_=mock_setattr)
self.mox.VerifyAll()
class X509CertificateTest(mox.MoxTestBase):
def setUp(self):
mox.MoxTestBase.setUp(self)
self.stubs = stubout.StubOutForTesting()
self.x = x509.X509Certificate()
self._cert_reset = {
'serial_num': None,
'issuer': None,
'subject': None,
'valid_notbefore': None,
'valid_notafter': None,
'fields_data': None,
'sig_data': None,
'sig_algorithm': None,
'entire_cert_data': None,
'public_key': None,
'may_act_as_ca': None,
'key_usage': None,
'subject_alt_name': None,
}
def tearDown(self):
self.mox.UnsetStubs()
self.stubs.UnsetAll()
def _CheckSaneCertFields(self, d):
"""Check that output dict keys are defined in _cert_reset.
Args:
d: dict, output from a _Get*FromSequence method
"""
for k in d:
self.assertTrue(k in self._cert_reset, 'Key %s is invalid in _cert' % k)
def testInit(self):
"""Test __init__()."""
self.mox.StubOutWithMock(x509.X509Certificate, 'Reset')
x509.X509Certificate.Reset().AndReturn(None)
self.mox.ReplayAll()
unused = x509.X509Certificate()
self.mox.VerifyAll()
def testReset(self):
"""Test Reset()."""
self.x.Reset()
self.assertEqual(self.x._cert, self._cert_reset)
def testCreateGetMethods(self):
"""Test the autogenerated methods from CreateGetMethod()."""
names = [
'Issuer',
'Subject',
'DatetimeNotValidBefore',
'DatetimeNotValidAfter',
'FieldsData',
'SignatureData',
'SignatureAlgorithm',
'SerialNumber',
'EntireCertData',
'PublicKey',
'MayActAsCA',
'KeyUsage',
'SubjectAltName',
]
for name in names:
self.assertTrue(
hasattr(self.x, 'Get%s' % name), 'has method Get%s' % name)
self.assertTrue(
type(getattr(self.x, 'Get%s' % name)) is types.MethodType,
'Get%s is a method' % name)
def testGetDataDict(self):
"""Test _GetDataDict()."""
self.assertEqual(self.x._cert, self.x._GetDataDict())
def testCertTimestampToDatetime(self):
"""Test _CertTimestampToDatetime()."""
self.mox.StubOutWithMock(x509.time, 'strptime')
self.mox.StubOutWithMock(x509.datetime, 'datetime', True)
time_ary = (1981, 1, 11, 0, 0, 0, 0, 'bla')
x509.time.strptime('ts', self.x.TIMESTAMP_FMT).AndReturn(time_ary)
x509.datetime.datetime(*time_ary[0:7]).AndReturn('datetime')
self.mox.ReplayAll()
self.assertEqual('datetime', self.x._CertTimestampToDatetime('ts'))
self.mox.VerifyAll()
def testStrToArray(self):
"""Test StrToArray()."""
r = tlslite_bridge.StrToArray('12313')
self.assertEqual(5, len(r))
self.assertTrue(isinstance(r, bytearray) or isinstance(r, array.array))
def testCertTimestampToDatetimeWhenBadTimestamp(self):
"""Test _CertTimestampToDatetime()."""
self.mox.StubOutWithMock(x509.time, 'strptime')
x509.time.strptime('ts', self.x.TIMESTAMP_FMT).AndRaise(ValueError)
self.mox.ReplayAll()
self.assertRaises(
x509.CertificateValueError,
self.x._CertTimestampToDatetime, 'ts')
self.mox.VerifyAll()
def testGetV3ExtensionFieldsFromSequenceWhenOIDKeyUsage(self):
"""Test _GetV3ExtensionFieldsFromSequence()."""
self.mox.StubOutWithMock(x509.der_decoder, 'decode', True)
e_key_usage = univ.OctetString('\x03e_key_usage')
d_key_usage = ((1, 0, 1),)
x509.der_decoder.decode(e_key_usage).AndReturn(d_key_usage)
seq = (
('junk', ('value', 'value')),
(x509.OID_X509V3_KEY_USAGE, e_key_usage),
)
output = {
'key_usage': (
x509.X509V3_KEY_USAGE_BIT_FIELDS[0],
x509.X509V3_KEY_USAGE_BIT_FIELDS[2],
),
}
self.mox.ReplayAll()
self.assertEqual(
output,
self.x._GetV3ExtensionFieldsFromSequence(seq))
self.mox.VerifyAll()
def testGetV3ExtensionFieldsFromSequenceWhenOIDKeyUsageBadParse(self):
"""Test _GetV3ExtensionFieldsFromSequence()."""
e_key_usage = univ.OctetString('e_key_usage')
d_key_usage = ((1, 0, 1),)
seq = (
('junk', ('value', 'value')),
(x509.OID_X509V3_KEY_USAGE, e_key_usage),
)
self.mox.ReplayAll()
self.assertRaises(
x509.CertificateParseError,
self.x._GetV3ExtensionFieldsFromSequence,
seq)
self.mox.VerifyAll()
def testGetV3ExtensionFieldsFromSequenceWhenOIDBasicConstraint(self):
"""Test _GetV3ExtensionFieldsFromSequence()."""
self.mox.StubOutWithMock(x509.der_decoder, 'decode', True)
e_basic_const = univ.OctetString('e_basic_const')
d_basic_const = ((True,), '')
x509.der_decoder.decode(e_basic_const).AndReturn(d_basic_const)
seq = (
('junk', ('value', 'value')),
(x509.OID_X509V3_BASIC_CONSTRAINTS, e_basic_const),
)
output = {
'may_act_as_ca': True,
}
self.mox.ReplayAll()
self.assertEqual(
output,
self.x._GetV3ExtensionFieldsFromSequence(seq))
self.mox.VerifyAll()
def testGetV3ExtensionFieldsFromSequenceWhenOIDBasicConstraintForm2(self):
"""Test _GetV3ExtensionFieldsFromSequence()."""
self.mox.StubOutWithMock(x509.der_decoder, 'decode', True)
e_basic_const = univ.OctetString('e_basic_const')
d_basic_const = ((True,), '')
x509.der_decoder.decode(e_basic_const).AndReturn(d_basic_const)
seq = (
('junk', ('value', 'value')),
(x509.OID_X509V3_BASIC_CONSTRAINTS, True, e_basic_const),
)
output = {
'may_act_as_ca': True,
}
self.mox.ReplayAll()
self.assertEqual(
output,
self.x._GetV3ExtensionFieldsFromSequence(seq))
self.mox.VerifyAll()
def testGetV3ExtensionFieldsFromSequenceWhenOIDBasicConstraintBadForm(self):
"""Test _GetV3ExtensionFieldsFromSequence()."""
self.mox.StubOutWithMock(x509.der_decoder, 'decode', True)
e_basic_const = univ.OctetString('e_basic_const')
d_basic_const = ((True,), '')
seq = (
('junk', ('value', 'value')),
(x509.OID_X509V3_BASIC_CONSTRAINTS, True, e_basic_const, 'what', 'ugh'),
)
output = {
'may_act_as_ca': True,
}
self.mox.ReplayAll()
self.assertRaises(
x509.CertificateParseError,
self.x._GetV3ExtensionFieldsFromSequence,
seq)
self.mox.VerifyAll()
def testGetV3ExtensionFieldsFromSequenceWhenOIDBasicConstraintPaths(self):
"""Test _GetV3ExtensionFieldsFromSequence()."""
self.mox.StubOutWithMock(x509.der_decoder, 'decode', True)
e_basic_const = univ.OctetString('e_basic_const')
d_basic_const = ((True,), ['unsupported path data'])
x509.der_decoder.decode(e_basic_const).AndReturn(d_basic_const)
seq = (
('junk', ('value', 'value')),
(x509.OID_X509V3_BASIC_CONSTRAINTS, e_basic_const),
)
output = {
'may_act_as_ca': True,
}
self.mox.ReplayAll()
self.assertRaises(
x509.CertificateParseError,
self.x._GetV3ExtensionFieldsFromSequence,
seq)
self.mox.VerifyAll()
def testGetV3ExtensionFieldsFromSequenceWhenOIDSubjectAltName(self):
"""Test _GetV3ExtensionFieldsFromSequence()."""
self.mox.StubOutWithMock(x509.der_decoder, 'decode', True)
e_mspn = univ.OctetString('\x30mspn der encoded')
d_mspn = (
(x509.OID_MS_NT_PRINCIPAL_NAME, 'foo'),
)
x509.der_decoder.decode(e_mspn).AndReturn(d_mspn)
seq = (
('junk', ('value', 'value')),
(x509.OID_X509V3_SUBJECT_ALT_NAME, e_mspn),
)
output = {
'subject_alt_name': 'X_MS_NT_Principal_Name=foo',
}
self.mox.ReplayAll()
self.assertEqual(
output,
self.x._GetV3ExtensionFieldsFromSequence(seq))
self.mox.VerifyAll()
def testGetV3ExtensionFieldsFromSequenceWhenOIDSubjectAltNameBadForm(self):
"""Test _GetV3ExtensionFieldsFromSequence()."""
e_mspn = univ.OctetString('mspn der encoded wrong encapsulation')
d_mspn = (
(x509.OID_MS_NT_PRINCIPAL_NAME, 'foo'),
)
seq = (
('junk', ('value', 'value')),
(x509.OID_X509V3_SUBJECT_ALT_NAME, e_mspn),
)
output = {
'subject_alt_name': 'X_MS_NT_Principal_Name=foo',
}
self.mox.ReplayAll()
self.assertRaises(
x509.CertificateParseError,
self.x._GetV3ExtensionFieldsFromSequence,
seq)
self.mox.VerifyAll()
def testGetV3ExtensionFieldsFromSequenceWhenOIDSubjectAltNameUnknownOID(self):
"""Test _GetV3ExtensionFieldsFromSequence()."""
self.mox.StubOutWithMock(x509.der_decoder, 'decode', True)
unknown_oid = (1, 2, 3)
e_mspn = univ.OctetString('\x30mspn der encoded')
d_mspn = (
(unknown_oid, 'foo'),
)
x509.der_decoder.decode(e_mspn).AndReturn(d_mspn)
seq = (
('junk', ('value', 'value')),
(x509.OID_X509V3_SUBJECT_ALT_NAME, e_mspn),
)
self.mox.ReplayAll()
self.assertRaises(
x509.CertificateParseError,
self.x._GetV3ExtensionFieldsFromSequence,
seq)
self.mox.VerifyAll()
def testAttributeValueToString(self):
"""Test _AttributeValueToString()."""
value = 'newyork'
expected = 'newyork'
self.assertEqual(value, expected)
result = self.x._AttributeValueToString(value)
self.assertEqual(expected, result)
def testAttributeValueToStringWhenLeadingBadCharsSpace(self):
"""Test _AttributeValueToString()."""
value = ' new york'
expected = '\\ new york'
result = self.x._AttributeValueToString(value)
self.assertEqual(expected, result)
def testAttributeValueToStringWhenLeadingBadCharsHash(self):
"""Test _AttributeValueToString()."""
value = '#new york'
expected = '\\#new york'
result = self.x._AttributeValueToString(value)
self.assertEqual(expected, result)
def testAttributeValueToStringWhenTrailingBadCharsSpace(self):
"""Test _AttributeValueToString()."""
value = 'new york '
expected = 'new york\\ '
result = self.x._AttributeValueToString(value)
self.assertEqual(expected, result)
def testAttributeValueToStringWhenContainsNull(self):
"""Test _AttributeValueToString()."""
value = 'new%syork' % chr(00)
expected = 'new\\00york'
result = self.x._AttributeValueToString(value)
self.assertEqual(expected, result)
def testAttributeValueToStringPreventIndexRegression(self):
"""Test _AttributeValueToString()."""
value = ',newyork'
expected = '\\,newyork'
result = self.x._AttributeValueToString(value)
self.assertEqual(expected, result)
def testAttributeValueToStringWhenCharsNeedingEscaping(self):
"""Test _AttributeValueToString()."""
chars = ['"', '+', ',', ';', '<', '>', '\\']
for c in chars:
value = 'new%syork' % c
expected = 'new\\%syork' % c
result = self.x._AttributeValueToString(value)
self.assertEqual(expected, result)
def testAttributeValueToStringWhenMultipleAdjacentTransformsNeeded(self):
"""Test _AttributeValueToString()."""
value = ' new,york;; '
expected = '\\ new\\,york\\;\\;\\ '
result = self.x._AttributeValueToString(value)
self.assertEqual(expected, result)
value = '#new,york;\x00, '
expected = '\\#new\\,york\\;\\00\\,\\ '
result = self.x._AttributeValueToString(value)
self.assertEqual(expected, result)
def testAssembleDNSequence(self):
"""Test _AssembleDNSequence()."""
value = (
((x509.OID_ID['CN'], 'foo'),),
((x509.OID_ID['OU'], 'bar'),),
)
self.mox.StubOutWithMock(self.x, '_AttributeValueToString')
self.x._AttributeValueToString('foo').AndReturn('foo')
self.x._AttributeValueToString('bar').AndReturn('bar')
self.mox.ReplayAll()
self.assertEqual(self.x._AssembleDNSequence(value), 'CN=foo,OU=bar')
self.mox.VerifyAll()
def testAssembleDNSequenceWhenUnknownOID(self):
"""Test _AssembleDNSequence()."""
bad_oid = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)
value = (
((bad_oid, 'foo'),),
((x509.OID_ID['OU'], 'bar'),),
)
self.assertRaises(
x509.CertificateParseError,
self.x._AssembleDNSequence,
value)
def testAssembleDNSequenceWhenBadStructure(self):
"""Test _AssembleDNSequence()."""
value = (
(x509.OID_ID['CN'], 'foo'), # bad structure
((x509.OID_ID['OU'], 'bar'),),
)
self.assertRaises(
x509.CertificateParseError,
self.x._AssembleDNSequence,
value)
def testGetFieldsFromSequence(self):
"""Test _GetFieldsFromSequence()."""
sig_alg_seq = ('a','b')
sig_alg = 'sigalg'
before_ts = self.mox.CreateMockAnything()
after_ts = self.mox.CreateMockAnything()
mock_utctime = self.mox.CreateMockAnything()
self.stubs.Set(x509.pyasn1.type.useful, 'UTCTime', mock_utctime)
mock_utctime().AndReturn(mock_utctime)
mock_utctime().AndReturn(mock_utctime)
before_ts.isSameTypeWith(mock_utctime).AndReturn(True)
after_ts.isSameTypeWith(mock_utctime).AndReturn(True)
serial_num = 12345
v3ext = {
'may_act_as_ca': 123,
'key_usage': (1, 2, 3),
'subject_alt_name': 'subj alt name',
}
seq = (
x509.X509_CERT_VERSION_3,
serial_num,
sig_alg_seq,
(((x509.OID_ID['CN'], 'issuer'),),),
(before_ts, after_ts),
(((x509.OID_ID['CN'], 'subject'),),),
'public key',
'x509v3 extensions',
)
seq_encoded = 'raw bytes'
before_dt = 'before_dt'
after_dt = 'after_dt'
self.mox.StubOutWithMock(self.x, '_GetSignatureAlgorithmFromSequence')
self.mox.StubOutWithMock(self.x, '_CertTimestampToDatetime')
self.mox.StubOutWithMock(self.x, '_GetV3ExtensionFieldsFromSequence')
self.mox.StubOutWithMock(self.x, '_AssembleDNSequence')
self.mox.StubOutWithMock(x509.der_encoder, 'encode', True)
self.x._GetSignatureAlgorithmFromSequence(
sig_alg_seq).AndReturn(sig_alg)
self.x._AssembleDNSequence(seq[3]).AndReturn('CN=issuer')
self.x._CertTimestampToDatetime(before_ts).AndReturn(before_dt)
self.x._CertTimestampToDatetime(after_ts).AndReturn(after_dt)
self.x._AssembleDNSequence(seq[5]).AndReturn('CN=subject')
self.x._GetV3ExtensionFieldsFromSequence(seq[7]).AndReturn(v3ext)
x509.der_encoder.encode(seq).AndReturn(seq_encoded)
self.mox.ReplayAll()
output = self.x._GetFieldsFromSequence(seq)
self._CheckSaneCertFields(output)
self.assertEqual(
output, {
'serial_num': serial_num,
'issuer': u'CN=issuer',
'subject': u'CN=subject',
'valid_notbefore': before_dt,
'valid_notafter': after_dt,
'fields_data': seq_encoded,
'sig_algorithm': sig_alg,
'may_act_as_ca': v3ext['may_act_as_ca'],
'key_usage': v3ext['key_usage'],
'subject_alt_name': v3ext['subject_alt_name'],
})
self.mox.VerifyAll()
def testGetFieldsFromSequenceWhenSeqShort(self):
"""Test _GetFieldsFromSequence()."""
serial_num = 12345
seq = (
x509.X509_CERT_VERSION_3,
serial_num,
) # fails (length of entire sequence too short)
self.mox.ReplayAll()
self.assertRaises(
x509.CertificateParseError,
self.x._GetFieldsFromSequence, seq)
self.mox.VerifyAll()
def testGetFieldsFromSequenceWhenWrongVersion(self):
"""Test _GetFieldsFromSequence()."""
seq = (
x509.X509_CERT_VERSION_3 * 2, # fails
1,
2,
3,
4,
5,
6,
)
self.mox.ReplayAll()
self.assertRaises(
x509.CertificateParseError,
self.x._GetFieldsFromSequence, seq)
self.mox.VerifyAll()
def testGetFieldsFromSequenceWhenValidityNotBeforeFail(self):
"""Test _GetFieldsFromSequence()."""
sig_alg_seq = ('a','b')
sig_alg = 'sigalg'
before_ts = self.mox.CreateMockAnything()
after_ts = self.mox.CreateMockAnything()
mock_utctime = self.mox.CreateMockAnything()
self.stubs.Set(x509.pyasn1.type.useful, 'UTCTime', mock_utctime)
mock_utctime().AndReturn(mock_utctime)
before_ts.isSameTypeWith(mock_utctime).AndReturn(False) # fails
serial_num = 12345
bad_oid_cn = (9) * 10
seq = (
x509.X509_CERT_VERSION_3,
serial_num,
sig_alg_seq,
(((x509.OID_ID['CN'], 'issuer'),),),
(before_ts, after_ts),
(((x509.OID_ID['CN'], 'subject'),),),
'public key',
'x509v3 extensions',
)
self.mox.StubOutWithMock(self.x, '_GetSignatureAlgorithmFromSequence')
self.mox.StubOutWithMock(self.x, '_AssembleDNSequence')
self.x._GetSignatureAlgorithmFromSequence(
sig_alg_seq).AndReturn(sig_alg)
self.x._AssembleDNSequence(seq[3]).AndReturn('CN=issuer')
self.mox.ReplayAll()
self.assertRaises(
x509.CertificateParseError,
self.x._GetFieldsFromSequence, seq)
self.mox.VerifyAll()
def testGetFieldsFromSequenceWhenValidityNotAfterFail(self):
"""Test _GetFieldsFromSequence()."""
sig_alg_seq = ('a','b')
sig_alg = 'sigalg'
before_ts = self.mox.CreateMockAnything()
after_ts = self.mox.CreateMockAnything()
mock_utctime = self.mox.CreateMockAnything()
self.stubs.Set(x509.pyasn1.type.useful, 'UTCTime', mock_utctime)
mock_utctime().AndReturn(mock_utctime)
mock_utctime().AndReturn(mock_utctime)
before_ts.isSameTypeWith(mock_utctime).AndReturn(True)
after_ts.isSameTypeWith(mock_utctime).AndReturn(False) # fails
serial_num = 12345
bad_oid_cn = (9) * 10
seq = (
x509.X509_CERT_VERSION_3,
serial_num,
sig_alg_seq,
(((x509.OID_ID['CN'], 'issuer'),),),
(before_ts, after_ts),
(((x509.OID_ID['CN'], 'subject'),),),
'public key',
'x509v3 extensions',
)
self.mox.StubOutWithMock(self.x, '_GetSignatureAlgorithmFromSequence')
self.mox.StubOutWithMock(self.x, '_AssembleDNSequence')
self.x._GetSignatureAlgorithmFromSequence(
sig_alg_seq).AndReturn(sig_alg)
self.x._AssembleDNSequence(seq[3]).AndReturn('CN=issuer')
self.mox.ReplayAll()
self.assertRaises(
x509.CertificateParseError,
self.x._GetFieldsFromSequence, seq)
self.mox.VerifyAll()
def testGetFieldsFromSequenceWhenX509V3Missing(self):
"""Test _GetFieldsFromSequence()."""
sig_alg_seq = ('a','b')
sig_alg = 'sigalg'
before_ts = self.mox.CreateMockAnything()
after_ts = self.mox.CreateMockAnything()
mock_utctime = self.mox.CreateMockAnything()
self.stubs.Set(x509.pyasn1.type.useful, 'UTCTime', mock_utctime)
mock_utctime().AndReturn(mock_utctime)
mock_utctime().AndReturn(mock_utctime)
before_ts.isSameTypeWith(mock_utctime).AndReturn(True)
after_ts.isSameTypeWith(mock_utctime).AndReturn(True)
serial_num = 12345
v3ext = { 'may_act_as_ca': 123, 'key_usage': (1, 2, 3) }
seq = (
x509.X509_CERT_VERSION_3,
serial_num,
sig_alg_seq,
(((x509.OID_ID['CN'], 'issuer'),),),
(before_ts, after_ts),
(((x509.OID_ID['CN'], 'subject'),),),
'public key',
)
seq_encoded = 'raw bytes'
before_dt = 'before_dt'
after_dt = 'after_dt'
self.mox.StubOutWithMock(self.x, '_GetSignatureAlgorithmFromSequence')
self.mox.StubOutWithMock(self.x, '_CertTimestampToDatetime')
self.mox.StubOutWithMock(self.x, '_AssembleDNSequence')
self.mox.StubOutWithMock(x509.der_encoder, 'encode', True)
self.x._GetSignatureAlgorithmFromSequence(
sig_alg_seq).AndReturn(sig_alg)
self.x._AssembleDNSequence(seq[3]).AndReturn('CN=issuer')
self.x._CertTimestampToDatetime(before_ts).AndReturn(before_dt)
self.x._CertTimestampToDatetime(after_ts).AndReturn(after_dt)
self.x._AssembleDNSequence(seq[5]).AndReturn('CN=subject')
x509.der_encoder.encode(seq).AndReturn(seq_encoded)
self.mox.ReplayAll()
output = self.x._GetFieldsFromSequence(seq)
self._CheckSaneCertFields(output)
self.assertEqual(
output, {
'serial_num': serial_num,
'issuer': 'CN=issuer',
'subject': 'CN=subject',
'valid_notbefore': before_dt,
'valid_notafter': after_dt,
'fields_data': seq_encoded,
'sig_algorithm': sig_alg,
})
self.mox.VerifyAll()
def testGetSignatureAlgorithmFromSequence(self):
"""Test _GetSignatureAlgorithmFromSequence()."""
alg = self.x.SIGNATURE_ALGORITHMS[0]
seq = (alg, '')
output = self.x._GetSignatureAlgorithmFromSequence(seq)
self._CheckSaneCertFields(output)
self.assertEqual(output['sig_algorithm'], alg)
def testGetSignatureAlgorithmFromSequenceWhenBadOID(self):
"""Test _GetSignatureAlgorithmFromSequence()."""
alg = (5, 4, 3, 2, 1) # fake OID
self.assertFalse(alg in self.x.SIGNATURE_ALGORITHMS)
seq = (alg, '')
self.assertRaises(
x509.CertificateValueError,
self.x._GetSignatureAlgorithmFromSequence, seq)
def testGetSignatureAlgorithmFromSequenceWhenJunkSeq(self):
"""Test _GetSignatureAlgorithmFromSequence()."""
alg = self.x.SIGNATURE_ALGORITHMS[0]
seq = (alg, '', '', '')
self.assertRaises(
x509.CertificateParseError,
self.x._GetSignatureAlgorithmFromSequence, seq)
def testGetSignatureAlgorithmFromSequenceWhenJunk(self):
"""Test _GetSignatureAlgorithmFromSequence()."""
seq = True
self.assertRaises(
x509.CertificateParseError,
self.x._GetSignatureAlgorithmFromSequence, seq)
def testGetSignatureFromSequence(self):
"""Test _GetSignatureFromSequence()."""
bits = 1024
good_seq = [1] * bits
good_sig = (bits/8) * 'x'
self.mox.StubOutWithMock(x509.der_encoder, 'encode', True)
x509.der_encoder.encode(good_seq).AndReturn('junkJunkJUNK%s' % good_sig)
self.mox.ReplayAll()
output = self.x._GetSignatureFromSequence(good_seq)
self._CheckSaneCertFields(output)
self.assertEqual(output['sig_data'], good_sig)
self.mox.VerifyAll()
def testGetSignatureFromSequenceWhenShortSeq(self):
"""Test _GetSignatureFromSequence()."""
short_seq = [1] * 5
self.mox.ReplayAll()
self.assertRaises(
x509.CertificateParseError,
self.x._GetSignatureFromSequence, short_seq)
self.mox.VerifyAll()
def testGetSignatureFromSequenceWhenNonBinarySeq(self):
"""Test _GetSignatureFromSequence()."""
non_binary_seq = [2] * 2048
self.mox.ReplayAll()
self.assertRaises(
x509.CertificateParseError,
self.x._GetSignatureFromSequence, non_binary_seq)
self.mox.VerifyAll()
def testGetSignatureFromSequenceWhenJunkInput(self):
"""Test _GetSignatureFromSequence()."""
junk_seq = ['a'] * 1024
self.mox.ReplayAll()
self.assertRaises(
x509.CertificateParseError,
self.x._GetSignatureFromSequence, junk_seq)
self.mox.VerifyAll()
def testGetCertSequencesFromTopSequence(self):
"""Test GetCertSequencesFromTopSequence()."""
seq = ((0, 1, 2),)
self.mox.StubOutWithMock(self.x, '_GetFieldsFromSequence')
self.mox.StubOutWithMock(self.x, '_GetSignatureAlgorithmFromSequence')
self.mox.StubOutWithMock(self.x, '_GetSignatureFromSequence')
self.x._GetFieldsFromSequence(seq[0][0]).AndReturn({'a':1})
self.x._GetSignatureAlgorithmFromSequence(seq[0][1]).AndReturn({'b':1})
self.x._GetSignatureFromSequence(seq[0][2]).AndReturn({'c':1})
self.mox.ReplayAll()
o = self.x._GetCertSequencesFromTopSequence(seq)
self.assertEqual(o, {'a':1, 'b':1, 'c':1})
self.mox.VerifyAll()
def testGetCertSequencesFromTopSequenceWhenBadTuple(self):
"""Test _GetCertSequencesFromTopSequence()."""
seq = ()
self.assertRaises(
x509.CertificateParseError,
self.x._GetCertSequencesFromTopSequence,
seq)
seq = 'not a tuple'
self.assertRaises(
x509.CertificateParseError,
self.x._GetCertSequencesFromTopSequence,
seq)
def testGetPublicKeyFromByteString(self):
"""Test _GetPublicKeyFromByteString()."""
bytes = 'bytes'
publickey = 'publickey'
self.mox.StubOutClassWithMocks(x509.tlslite_bridge, 'X509')
mock_tls509 = x509.tlslite_bridge.X509()
mock_tls509.parseBinary(bytes).AndReturn(None)
mock_tls509.publicKey = publickey
self.mox.ReplayAll()
self.assertEqual(
{'public_key': publickey},
self.x._GetPublicKeyFromByteString(bytes))
self.mox.VerifyAll()
def testLoadFromByteString(self):
"""Test LoadFromByteString()."""
self.x.Reset()
base_cert = self.x._cert
self.mox.StubOutWithMock(x509.der_decoder, 'decode', True)
self.mox.StubOutWithMock(self.x, '_GetCertSequencesFromTopSequence')
self.mox.StubOutWithMock(self.x, '_GetPublicKeyFromByteString')
self.mox.StubOutWithMock(self.x, 'Reset')
bytes = 'bytes'
seq = 'seq'
certseq = {'certseq': 1}
pubkey = {'pubkey': 1}
cert = { 'entire_byte_string': bytes }
cert.update(base_cert)
cert.update(certseq)
cert.update(pubkey)
x509.der_decoder.decode(bytes).AndReturn(seq)
self.x._GetCertSequencesFromTopSequence(seq).AndReturn(certseq)
self.x._GetPublicKeyFromByteString(bytes).AndReturn(pubkey)
self.x.Reset().AndReturn(None)
self.mox.ReplayAll()
self.x.LoadFromByteString(bytes)
self.assertEqual(self.x._cert, cert)
self.mox.VerifyAll()
def testLoadFromByteStringWhenPyAsn1Error(self):
"""Test LoadFromByteString()."""
self.mox.StubOutWithMock(x509.der_decoder, 'decode', True)
bytes = 'bytes'
x509.der_decoder.decode(bytes).AndRaise(x509.pyasn1.error.PyAsn1Error)
self.mox.ReplayAll()
self.assertRaises(
x509.CertificateASN1FormatError,
self.x.LoadFromByteString, bytes)
self.mox.VerifyAll()
def testCheckValidityWhenObtainUtc(self):
"""Test CheckValidity()."""
mock_datetime = self.mox.CreateMock(x509.datetime.datetime)
self.stubs.Set(x509.datetime, 'datetime', mock_datetime)
mock_datetime.utcnow().AndReturn(2)
self.x._cert['valid_notafter'] = 5
self.x._cert['valid_notbefore'] = 0
self.mox.ReplayAll()
self.x.CheckValidity()
self.mox.VerifyAll()
def testCheckValidityWhenTooNew(self):
"""Test CheckValidity()."""
self.x._cert['valid_notafter'] = 1
self.mox.ReplayAll()
self.assertRaises(
x509.CertificateError,
self.x.CheckValidity,
2)
self.mox.VerifyAll()
def testCheckValidityWhenTooOld(self):
"""Test CheckValidity()."""
self.x._cert['valid_notafter'] = 10
self.x._cert['valid_notbefore'] = 5
self.mox.ReplayAll()
self.assertRaises(
x509.CertificateError,
self.x.CheckValidity,
2)
self.mox.VerifyAll()
def testCheckIssuerWhenNoIssuerSupplied(self):
"""Test CheckIssuer()."""
self.x._required_issuer = 'required'
self.x._cert['issuer'] = 'required'
self.mox.ReplayAll()
self.x.CheckIssuer()
self.mox.VerifyAll()
def testCheckIssuerWhenFailed(self):
"""Test CheckIssuer()."""
self.x._required_issuer = None
self.x._cert['issuer'] = 'required'
self.mox.ReplayAll()
self.assertRaises(
x509.CertificateValueError,
self.x.CheckIssuer, 'some other issuer')
self.mox.VerifyAll()
def testCheckIssuerWhenNoRequirement(self):
"""Test CheckIssuer()."""
self.x._required_issuer = None
self.x._cert['issuer'] = 'no one cares'
self.mox.ReplayAll()
self.x.CheckIssuer()
self.mox.VerifyAll()
def testCheckAll(self):
"""Test CheckAll()."""
self.mox.StubOutWithMock(self.x, 'CheckValidity')
self.mox.StubOutWithMock(self.x, 'CheckIssuer')
self.x.CheckValidity().AndReturn(None)
self.x.CheckIssuer().AndReturn(None)
self.mox.ReplayAll()
self.x.CheckAll()
self.mox.VerifyAll()
def testSetRequiredIssuer(self):
"""Test SetRequiredIssuer()."""
self.x.SetRequiredIssuer('required')
self.assertEqual(self.x._required_issuer, 'required')
def testIsSignedBy(self):
"""Test IsSignedBy()."""
self.mox.StubOutWithMock(tlslite_bridge, 'StrToArray')
self.mox.StubOutWithMock(self.x, 'GetSignatureData')
self.mox.StubOutWithMock(self.x, 'GetFieldsData')
mock_othercert = self.mox.CreateMockAnything()
mock_othercert.GetMayActAsCA().AndReturn(True)
mock_othercert.GetPublicKey().AndReturn(mock_othercert) # lazy re-use
self.x.GetSignatureData().AndReturn('sigdata')
self.x.GetFieldsData().AndReturn('fieldsdata')
tlslite_bridge.StrToArray('sigdata').AndReturn('arysigdata')
tlslite_bridge.StrToArray('fieldsdata').AndReturn('aryfieldsdata')
mock_othercert.hashAndVerify('arysigdata', 'aryfieldsdata').AndReturn(True)
self.mox.ReplayAll()
self.assertTrue(self.x.IsSignedBy(mock_othercert))
self.mox.VerifyAll()
def testIsSignedByWhenOtherCertNotCA(self):
"""Test IsSignedBy()."""
mock_othercert = self.mox.CreateMockAnything()
mock_othercert.GetMayActAsCA().AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(
x509.CertificateValueError,
self.x.IsSignedBy, mock_othercert)
self.mox.VerifyAll()
def main(unused_argv):
basetest.main()
if __name__ == '__main__':
app.run()
|
frlen/simian
|
src/tests/simian/auth/x509_test.py
|
Python
|
apache-2.0
| 37,278
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_pod_security_policy_list import V1beta1PodSecurityPolicyList
class TestV1beta1PodSecurityPolicyList(unittest.TestCase):
""" V1beta1PodSecurityPolicyList unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1PodSecurityPolicyList(self):
"""
Test V1beta1PodSecurityPolicyList
"""
model = kubernetes.client.models.v1beta1_pod_security_policy_list.V1beta1PodSecurityPolicyList()
if __name__ == '__main__':
unittest.main()
|
djkonro/client-python
|
kubernetes/test/test_v1beta1_pod_security_policy_list.py
|
Python
|
apache-2.0
| 967
|
#__author__ = 'hello'
# -*- coding: cp936 -*-
import re
import os
import random
import json
import string
import ctypes
from myexception import *
PATH = './img/'
dm2 = ctypes.WinDLL('./CrackCaptchaAPI.dll')
if not os.path.exists('./img'):
os.mkdir('./img')
def str_tr(content):
instr = "0123456789"
outstr ="QAEDTGUJOL"
trantab = string.maketrans(instr,outstr)
return content.translate(trantab)
def getHid():
import wmi
m = wmi.WMI()
a = ''
b = ''
for cpu in m.Win32_Processor():
a = cpu.Processorid.strip()
for bd in m.Win32_BIOS():
b= bd.SerialNumber.strip()
return a+b
def getEightRandomString():
return ''.join(random.sample(string.ascii_letters,8))
def getCToken(content):
s = ''
pattern = re.compile('securityCToken = "([+-]?\d*)"')
match = pattern.search(content)
if match:
s = match.group(1)
return s
def GetCaptcha(content):
global PATH
filename = ''.join(random.sample(string.ascii_letters,8))
filename += '.jpg'
filename = PATH+filename
img = None
try:
img = open(filename,'wb')
img.write(content)
except IOError:
raise FileCanNotCreate('open file error')
finally:
if img:
img.close()
dm2.D2File.argtypes=[ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_short, ctypes.c_int, ctypes.c_char_p]
dm2.D2File.restype = ctypes.c_int
key = ctypes.c_char_p('fa6fd217145f273b59d7e72c1b63386e')
id = ctypes.c_long(54)
user = ctypes.c_char_p('test')
pas = ctypes.c_char_p('test')
timeout = ctypes.c_short(30)
result = ctypes.create_string_buffer('/0'*100)
ret = -1
ret = dm2.D2File(key,user, pas, filename,timeout,id,(result))
if ret > 0:
return result.value
elif ret == -101:
raise D2FILE(u'Óà¶î²»×ã,ÐèÒª³äÖµ')
elif ret > -199:
raise D2FILE('user info error')
elif ret == -208:
raise D2FILE('software can not user')
elif ret == -210:
raise D2FILE('invalid user')
elif ret == -301:
raise D2FILE('can not find dll')
else:
raise D2FILE(u'ʶ±ð¿â³ö´í')
def GetTimeSlot(content,num):
try:
timeslot = json.loads(content)
slotLen = len(timeslot['timeSlots'])
if num < slotLen:
return timeslot['timeSlots'][num]['startTime'],timeslot['timeSlots]'[num]['timeslotID']]
elif slotLen > 0:
return timeslot['timeSlots'][slotLen-1]['startTime'],timeslot['timeSlots]'[slotLen-1]['timeslotID']]
except ValueError,e:
raise NoJsonData('')
def sendEmail(count):
import smtplib
from email.mime.text import MIMEText
from email.header import Header
smtpserver = 'smtp.163.com'
sender = 'sghcarbon@163.com'
receiver = 'sghcarbon@163.com'
subject = u'Ô¤¶©¸öÊý'
user = 'sghcarbon'
pas = 'carbon216'
content = getHid()+u'Ô¤¶©¸öÊý:'+str(count)
msg = MIMEText(content,'plain','utf-8')
msg['Subject'] = Header(subject,'utf-8')
msg['From'] = sender
msg['To'] = receiver
try:
send_smtp = smtplib.SMTP()
send_smtp.connect(smtpserver)
send_smtp.login(user,pas)
send_smtp.sendmail(sender,receiver,msg.as_string())
send_smtp.close()
print 'ok'
except:
print 'error'
|
dading/iphone_order
|
util.py
|
Python
|
apache-2.0
| 3,365
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test alter op layout pass"""
import tvm
from tvm import relay
from tvm.relay.op import register_alter_op_layout
from tvm.relay import transform, analysis
def run_opt_pass(expr, passes):
passes = passes if isinstance(passes, list) else [passes]
mod = relay.Module.from_expr(expr)
seq = transform.Sequential(passes)
with transform.PassContext(opt_level=3):
mod = seq(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def test_alter_op():
"""Test directly replacing an operator with a new one"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var('weight', shape=(64, 64, 3, 3))
y = relay.nn.conv2d(x, weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1))
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
@register_alter_op_layout("nn.conv2d", level=100)
def alter_conv2d(attrs, inputs, tinfos):
data, weight = inputs
weight = relay.multiply(weight, relay.const(2.0, "float32"))
return relay.nn.conv2d(data, weight, **attrs)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var('weight', shape=(64, 64, 3, 3))
y = relay.nn.conv2d(x, relay.multiply(weight, relay.const(2.0, "float32")),
channels=64,
kernel_size=(3, 3),
padding=(1, 1))
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
a = before()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected(), transform.InferType())
assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)
def test_alter_return_none():
"""Test doing nothing by returning 'None' """
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
y = relay.nn.global_max_pool2d(x)
y = relay.Function([x], y)
return y
called = [False]
@register_alter_op_layout("nn.global_max_pool2d", level=101)
def alter_conv2d(attrs, inputs, tinfos):
called[0] = True
return None
a = before()
a = run_opt_pass(a, transform.AlterOpLayout())
b = before()
b = run_opt_pass(b, transform.InferType())
assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)
assert(called[0])
def test_alter_layout():
"""Test alternating the layout of a conv2d.
The layout of broadcast operators and the weight should be changed accordingly.
"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
bias = relay.var("bias")
weight = relay.var("weight")
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.bias_add(y, bias)
# a useless tuple, which will be eliminated
y = relay.Tuple([y])[0]
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(2, 2))
y = relay.cast(y, 'int32')
y = relay.nn.batch_flatten(y)
y = relay.Function(analysis.free_vars(y), y)
return y
@register_alter_op_layout("nn.conv2d", level=102)
def alter_conv2d(attrs, inputs, tinfos):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs['data_layout'] = 'NCHW16c'
new_attrs['kernel_layout'] = 'OIHW16i'
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
bias = relay.var("bias", shape=(64,))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.layout_transform(x, "NCHW", "NCHW16c")
w = relay.layout_transform(weight, "OIHW", "OIHW16i")
y = relay.nn.conv2d(y, w,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
kernel_layout="OIHW16i",
data_layout="NCHW16c")
b = relay.expand_dims(bias, axis=1, num_newaxis=2)
b = relay.layout_transform(b, "CHW", "CHW16c")
y = relay.add(y, b)
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(2, 2), layout="NCHW16c")
y = relay.cast(y, 'int32')
y = relay.layout_transform(y, "NCHW16c", "NCHW")
y = relay.nn.batch_flatten(y)
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, [transform.CanonicalizeOps(),
transform.AlterOpLayout()])
b = expected()
b = run_opt_pass(b, transform.InferType())
assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_dual_path():
"""
Test alternating the layout with two outputs.
One path continues to use the new layout while one path fall backs to old layout.
"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var('weight1')
weight2 = relay.var('weight2')
y = relay.nn.conv2d(x, weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1))
y = relay.nn.relu(y)
y1 = relay.nn.conv2d(y, weight2,
channels=32,
kernel_size=(3, 3),
padding=(1, 1))
y1 = relay.nn.relu(y1)
y2 = relay.nn.batch_flatten(y)
ret = relay.Tuple([y1, y2])
y = relay.Function(analysis.free_vars(ret), ret)
return y
@register_alter_op_layout("nn.conv2d", level=103)
def alter_conv2d(attrs, inputs, tinfos):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs['data_layout'] = 'NCHW16c'
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var('weight1')
weight2 = relay.var('weight2')
y = relay.layout_transform(x, "NCHW", "NCHW16c")
y = relay.nn.conv2d(y, weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW16c")
y = relay.nn.relu(y)
y1 = relay.nn.conv2d(y, weight2,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout='NCHW16c')
y1 = relay.nn.relu(y1)
y1 = relay.layout_transform(y1, "NCHW16c", "NCHW")
y2 = relay.layout_transform(y, "NCHW16c", "NCHW")
y2 = relay.nn.batch_flatten(y2)
ret = relay.Tuple([y1, y2])
y = relay.Function(analysis.free_vars(ret), ret)
return y
a = before()
a = run_opt_pass(a, transform.AlterOpLayout())
b = expected()
b = run_opt_pass(b, transform.InferType())
assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_resnet():
"""Test alternating the layout of a residual block
This also tests the elimination of duplicated transformation.
If a same transformation applies to a same node twice, only one transformation will be created.
"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var('weight1')
weight2 = relay.var('weight2')
y = relay.nn.conv2d(x, weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1))
y = relay.nn.relu(y)
y2 = relay.nn.conv2d(x, weight2,
channels=32,
kernel_size=(1, 1))
y2 = relay.nn.relu(y2)
y = y + y2
y = relay.nn.global_max_pool2d(y)
return relay.Function(analysis.free_vars(y), y)
@register_alter_op_layout("nn.conv2d", level=104)
def alter_conv2d(attrs, inputs, tinfos):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs['data_layout'] = 'NCHW16c'
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var('weight1')
weight2 = relay.var('weight2')
x = relay.layout_transform(x, "NCHW", "NCHW16c")
y = relay.nn.conv2d(x, weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW16c")
y = relay.nn.relu(y)
y2 = relay.nn.conv2d(x, weight2,
channels=32,
kernel_size=(1, 1),
data_layout='NCHW16c')
y2 = relay.nn.relu(y2)
y = y + y2
y = relay.nn.global_max_pool2d(y, layout="NCHW16c")
y = relay.layout_transform(y, "NCHW16c", "NCHW")
return relay.Function(analysis.free_vars(y), y)
a = before()
a = run_opt_pass(a, transform.AlterOpLayout())
b = expected()
b = run_opt_pass(b, transform.InferType())
assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_broadcast_op():
"""Test boradcast operators """
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
bias = relay.var("bias", shape=(64,))
scale = relay.var("scale", shape=(64, 1, 1))
weight = relay.var("weight")
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.bias_add(y, bias) # test broadcasting to lhs
y = relay.multiply(scale, y) # test broadcasting to rhs
y = relay.Function(analysis.free_vars(y), y)
return y
@register_alter_op_layout("nn.conv2d", level=105)
def alter_conv2d(attrs, inputs, tinfos):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs['data_layout'] = 'NCHW16c'
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
bias = relay.var("bias", shape=(64,))
scale = relay.var("scale", shape=(64, 1, 1))
weight = relay.var("weight")
x = relay.layout_transform(x, "NCHW", "NCHW16c")
bias = relay.expand_dims(bias, 1, 2)
bias = relay.layout_transform(bias, "CHW", "CHW16c")
scale = relay.layout_transform(scale, "CHW", "CHW16c")
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1),
data_layout="NCHW16c")
y = relay.add(y, bias) # test broadcasting to lhs
y = relay.multiply(scale, y) # test broadcasting to rhs
y = relay.layout_transform(y, "NCHW16c", "NCHW")
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, [transform.CanonicalizeOps(),
transform.AlterOpLayout()])
b = expected()
b = run_opt_pass(b, transform.InferType())
assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_scalar():
"""Test alternating the layout of a conv2d.
The layout of broadcast operators and the weight should be changed accordingly.
"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight")
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.add(y, relay.const(1, "float32"))
y = relay.Function(analysis.free_vars(y), y)
return y
@register_alter_op_layout("nn.conv2d", level=106)
def alter_conv2d(attrs, inputs, tinfos):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs['data_layout'] = 'NCHW16c'
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
w = relay.var("weight")
y = relay.layout_transform(x, "NCHW", "NCHW16c")
y = relay.nn.conv2d(y, w,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW16c")
y = relay.add(y, relay.const(1.0, "float32"))
y = relay.layout_transform(y, "NCHW16c", "NCHW")
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, [transform.CanonicalizeOps(),
transform.AlterOpLayout()])
b = expected()
b = run_opt_pass(b, transform.InferType())
assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_concatenate():
""" """
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var('weight1')
weight2 = relay.var('weight2')
y = relay.nn.conv2d(x, weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1))
y1 = relay.nn.conv2d(y, weight2,
channels=32,
kernel_size=(3, 3),
padding=(1, 1))
ret = relay.concatenate([y, y1], axis=1)
y = relay.Function(analysis.free_vars(ret), ret)
return y
@register_alter_op_layout("nn.conv2d", level=107)
def alter_conv2d(attrs, inputs, tinfos):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs['data_layout'] = 'NCHW16c'
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var('weight1')
weight2 = relay.var('weight2')
y = relay.layout_transform(x, "NCHW", "NCHW16c")
y = relay.nn.conv2d(y, weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW16c")
y1 = relay.nn.conv2d(y, weight2,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout='NCHW16c')
ret = relay.concatenate([y, y1], axis=1)
ret = relay.layout_transform(ret, "NCHW16c", "NCHW")
y = relay.Function(analysis.free_vars(ret), ret)
return y
a = before()
a = run_opt_pass(a, transform.AlterOpLayout())
b = expected()
b = run_opt_pass(b, transform.InferType())
assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_nchw_upsamping_op():
"""Test upsamping operators """
def before():
x = relay.var("x", shape=(1, 32, 28, 28))
weight = relay.var('weight', shape=(32, 32, 3, 3))
y = relay.nn.conv2d(x, weight, channels=32, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.upsampling(y, scale=2)
y = relay.nn.avg_pool2d(y, pool_size=(2, 2), strides=(2, 2))
y = relay.Function(analysis.free_vars(y), y)
return y
@register_alter_op_layout("nn.conv2d", level=108)
def alter_conv2d(attrs, inputs, tinfos):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs['data_layout'] = 'NCHW16c'
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 32, 28, 28))
weight = relay.var("weight")
x = relay.layout_transform(x, "NCHW", "NCHW16c")
y = relay.nn.conv2d(x, weight, channels=32, kernel_size=(3, 3), padding=(1, 1),
data_layout="NCHW16c")
y = relay.nn.upsampling(y, scale=2, layout="NCHW16c")
y = relay.nn.avg_pool2d(y, pool_size=(2, 2), strides=(2, 2), layout='NCHW16c')
y = relay.layout_transform(y, "NCHW16c", "NCHW")
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, [transform.CanonicalizeOps(),
transform.AlterOpLayout()])
b = expected()
b = run_opt_pass(b, transform.InferType())
assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_strided_slice():
"""Test rewriting strided_slice during alter_iop_layout"""
def before():
x = relay.var("x", shape=(1, 32, 28, 28))
weight = relay.var('weight', shape=(32, 32, 3, 3))
y = relay.nn.conv2d(x, weight, channels=32, kernel_size=(3, 3), padding=(1, 1))
y = relay.strided_slice(y, begin=[0, 16], end=[None, None])
y = relay.Function(analysis.free_vars(y), y)
return y
@register_alter_op_layout("nn.conv2d", level=109)
def alter_conv2d(attrs, inputs, tinfos):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs['data_layout'] = 'NCHW4c'
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 32, 28, 28))
weight = relay.var("weight")
x = relay.layout_transform(x, "NCHW", "NCHW4c")
y = relay.nn.conv2d(x, weight, channels=32, kernel_size=(3, 3), padding=(1, 1),
data_layout="NCHW4c")
y = relay.strided_slice(y, begin=[0, 4], end=[None, 8])
y = relay.layout_transform(y, "NCHW4c", "NCHW")
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, [transform.CanonicalizeOps(),
transform.AlterOpLayout()])
b = expected()
b = run_opt_pass(b, transform.InferType())
assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_depthwise_conv2d():
"""Test depthwise_conv2d operator"""
def before():
x = relay.var("x", shape=(1, 32, 56, 56))
w = relay.var("w", shape=(32, 1, 3, 3))
y = relay.nn.conv2d(x, w, padding=(1, 1), channels=32, kernel_size=(3, 3), groups=32)
y = relay.Function(analysis.free_vars(y), y)
return y
import topi
@register_alter_op_layout("nn.conv2d", level=110)
def alter_conv2d(attrs, inputs, tinfos):
with tvm.target.create("llvm"):
return topi.nn.conv2d_alter_layout(attrs, inputs, tinfos, relay)
def expected():
x = relay.var("x", shape=(1, 32, 56, 56))
w = relay.var("w", shape=(32, 1, 3, 3))
x = relay.layout_transform(x, "NCHW", "NCHW8c")
w = relay.layout_transform(w, "OIHW", "OIHW1i8o")
y = relay.nn.contrib_depthwise_conv2d_nchwc(x, w, padding=(1, 1), channels=32, kernel_size=(3, 3),
groups=32, data_layout="NCHW8c", kernel_layout="OIHW1i8o",
out_layout="NCHW8c")
y = relay.layout_transform(y, "NCHW8c", "NCHW")
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, [transform.CanonicalizeOps(),
transform.AlterOpLayout()])
b = expected()
b = run_opt_pass(b, transform.InferType())
assert(analysis.alpha_equal(a, b))
def test_alter_layout_prelu():
"""Test PRelu operator"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight")
alpha = relay.var("alpha", relay.IncompleteType())
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.prelu(y, alpha)
y = relay.Function(analysis.free_vars(y), y)
return y
@register_alter_op_layout("nn.conv2d", level=111)
def alter_conv2d(attrs, inputs, tinfos):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs['data_layout'] = 'NCHW16c'
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
w = relay.var("weight")
alpha = relay.var("alpha", relay.IncompleteType())
y = relay.layout_transform(x, "NCHW", "NCHW16c")
y = relay.nn.conv2d(y, w,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW16c")
y = relay.layout_transform(y, "NCHW16c", "NCHW")
y = relay.nn.prelu(y, alpha)
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, [transform.CanonicalizeOps(), transform.AlterOpLayout()])
b = expected()
b = run_opt_pass(b, transform.InferType())
assert(analysis.alpha_equal(a, b))
if __name__ == "__main__":
test_alter_op()
test_alter_return_none()
test_alter_layout()
test_alter_layout_dual_path()
test_alter_layout_resnet()
test_alter_layout_broadcast_op()
test_alter_layout_scalar()
test_alter_layout_concatenate()
test_alter_layout_nchw_upsamping_op()
test_alter_layout_strided_slice()
test_alter_layout_depthwise_conv2d()
test_alter_layout_prelu()
|
Huyuwei/tvm
|
tests/python/relay/test_pass_alter_op_layout.py
|
Python
|
apache-2.0
| 22,068
|
"""Let's Encrypt client errors."""
class Error(Exception):
"""Generic Let's Encrypt client error."""
class AccountStorageError(Error):
"""Generic `.AccountStorage` error."""
class AccountNotFound(AccountStorageError):
"""Account not found error."""
class ReverterError(Error):
"""Let's Encrypt Reverter error."""
class SubprocessError(Error):
"""Subprocess handling error."""
class CertStorageError(Error):
"""Generic `.CertStorage` error."""
class HookCommandNotFound(Error):
"""Failed to find a hook command in the PATH."""
# Auth Handler Errors
class AuthorizationError(Error):
"""Authorization error."""
class FailedChallenges(AuthorizationError):
"""Failed challenges error.
:ivar set failed_achalls: Failed `.AnnotatedChallenge` instances.
"""
def __init__(self, failed_achalls):
assert failed_achalls
self.failed_achalls = failed_achalls
super(FailedChallenges, self).__init__()
def __str__(self):
return "Failed authorization procedure. {0}".format(
", ".join(
"{0} ({1}): {2}".format(achall.domain, achall.typ, achall.error)
for achall in self.failed_achalls if achall.error is not None))
# Plugin Errors
class PluginError(Error):
"""Let's Encrypt Plugin error."""
class PluginEnhancementAlreadyPresent(Error):
""" Enhancement was already set """
class PluginSelectionError(Error):
"""A problem with plugin/configurator selection or setup"""
class NoInstallationError(PluginError):
"""Let's Encrypt No Installation error."""
class MisconfigurationError(PluginError):
"""Let's Encrypt Misconfiguration error."""
class NotSupportedError(PluginError):
"""Let's Encrypt Plugin function not supported error."""
class StandaloneBindError(Error):
"""Standalone plugin bind error."""
def __init__(self, socket_error, port):
super(StandaloneBindError, self).__init__(
"Problem binding to port {0}: {1}".format(port, socket_error))
self.socket_error = socket_error
self.port = port
class ConfigurationError(Error):
"""Configuration sanity error."""
# NoninteractiveDisplay iDisplay plugin error:
class MissingCommandlineFlag(Error):
"""A command line argument was missing in noninteractive usage"""
|
mitnk/letsencrypt
|
letsencrypt/errors.py
|
Python
|
apache-2.0
| 2,342
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.Timeline."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import tensorflow as tf
from tensorflow.python.client import timeline
class TimelineTest(tf.test.TestCase):
def _validateTrace(self, chrome_trace_format):
# Check that the supplied string is valid JSON.
trace = json.loads(chrome_trace_format)
# It should have a top-level key containing events.
self.assertTrue('traceEvents' in trace)
# Every event in the list should have a 'ph' field.
for event in trace['traceEvents']:
self.assertTrue('ph' in event)
def testSimpleTimeline(self):
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
with tf.device('/cpu:0'):
with tf.Session() as sess:
sess.run(
tf.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
tl = timeline.Timeline(run_metadata.step_stats)
ctf = tl.generate_chrome_trace_format()
self._validateTrace(ctf)
def testTimelineCpu(self):
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
with self.test_session(use_gpu=False) as sess:
const1 = tf.constant(1.0, name='const1')
const2 = tf.constant(2.0, name='const2')
result = tf.add(const1, const2) + const1 * const2
sess.run(result, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
step_stats = run_metadata.step_stats
devices = [d.device for d in step_stats.dev_stats]
self.assertTrue('/job:localhost/replica:0/task:0/cpu:0' in devices)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format()
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_dataflow=False)
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_memory=False)
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_memory=False,
show_dataflow=False)
self._validateTrace(ctf)
def testTimelineGpu(self):
if not tf.test.is_gpu_available(cuda_only=True):
return
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
with self.test_session(force_gpu=True) as sess:
const1 = tf.constant(1.0, name='const1')
const2 = tf.constant(2.0, name='const2')
result = tf.add(const1, const2) + const1 * const2
sess.run(result, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
step_stats = run_metadata.step_stats
devices = [d.device for d in step_stats.dev_stats]
self.assertTrue('/job:localhost/replica:0/task:0/gpu:0' in devices)
self.assertTrue('/gpu:0/stream:all' in devices)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format()
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_dataflow=False)
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_memory=False)
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_memory=False,
show_dataflow=False)
self._validateTrace(ctf)
def testAnalysisAndAllocations(self):
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
config = tf.ConfigProto(device_count={'CPU': 3})
with tf.Session(config=config) as sess:
with tf.device('/cpu:0'):
const1 = tf.constant(1.0, name='const1')
with tf.device('/cpu:1'):
const2 = tf.constant(2.0, name='const2')
with tf.device('/cpu:2'):
result = const1 + const2 + const1 * const2
sess.run(result, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
tl = timeline.Timeline(run_metadata.step_stats)
step_analysis = tl.analyze_step_stats()
ctf = step_analysis.chrome_trace.format_to_string()
self._validateTrace(ctf)
maximums = step_analysis.allocator_maximums
self.assertTrue('cpu' in maximums)
cpu_max = maximums['cpu']
# At least const1 + const2, both float32s (4 bytes each)
self.assertGreater(cpu_max.num_bytes, 8)
self.assertGreater(cpu_max.timestamp, 0)
self.assertTrue('const1' in cpu_max.tensors)
self.assertTrue('const2' in cpu_max.tensors)
def testManyCPUs(self):
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
config = tf.ConfigProto(device_count={'CPU': 3})
with tf.Session(config=config) as sess:
with tf.device('/cpu:0'):
const1 = tf.constant(1.0, name='const1')
with tf.device('/cpu:1'):
const2 = tf.constant(2.0, name='const2')
with tf.device('/cpu:2'):
result = const1 + const2 + const1 * const2
sess.run(result, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
step_stats = run_metadata.step_stats
devices = [d.device for d in step_stats.dev_stats]
self.assertTrue('/job:localhost/replica:0/task:0/cpu:0' in devices)
self.assertTrue('/job:localhost/replica:0/task:0/cpu:1' in devices)
self.assertTrue('/job:localhost/replica:0/task:0/cpu:2' in devices)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format()
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_dataflow=False)
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_memory=False)
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_memory=False,
show_dataflow=False)
self._validateTrace(ctf)
if __name__ == '__main__':
tf.test.main()
|
sandeepdsouza93/TensorFlow-15712
|
tensorflow/python/client/timeline_test.py
|
Python
|
apache-2.0
| 7,065
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Cloudwatt
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.api.object_storage import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest.test import attr
from tempest.test import HTTP_SUCCESS
QUOTA_BYTES = 10
QUOTA_COUNT = 3
SKIP_MSG = "Container quotas middleware not available."
class ContainerQuotasTest(base.BaseObjectTest):
"""Attemps to test the perfect behavior of quotas in a container."""
container_quotas_available = \
config.TempestConfig().object_storage_feature_enabled.container_quotas
def setUp(self):
"""Creates and sets a container with quotas.
Quotas are set by adding meta values to the container,
and are validated when set:
- X-Container-Meta-Quota-Bytes:
Maximum size of the container, in bytes.
- X-Container-Meta-Quota-Count:
Maximum object count of the container.
"""
super(ContainerQuotasTest, self).setUp()
self.container_name = data_utils.rand_name(name="TestContainer")
self.container_client.create_container(self.container_name)
metadata = {"quota-bytes": str(QUOTA_BYTES),
"quota-count": str(QUOTA_COUNT), }
self.container_client.update_container_metadata(
self.container_name, metadata)
def tearDown(self):
"""Cleans the container of any object after each test."""
self.delete_containers([self.container_name])
super(ContainerQuotasTest, self).tearDown()
@testtools.skipIf(not container_quotas_available, SKIP_MSG)
@attr(type="smoke")
def test_upload_valid_object(self):
"""Attempts to uploads an object smaller than the bytes quota."""
object_name = data_utils.rand_name(name="TestObject")
data = data_utils.arbitrary_string(QUOTA_BYTES)
nbefore = self._get_bytes_used()
resp, _ = self.object_client.create_object(
self.container_name, object_name, data)
self.assertIn(int(resp['status']), HTTP_SUCCESS)
nafter = self._get_bytes_used()
self.assertEqual(nbefore + len(data), nafter)
@testtools.skipIf(not container_quotas_available, SKIP_MSG)
@attr(type="smoke")
def test_upload_large_object(self):
"""Attempts to upload an object lagger than the bytes quota."""
object_name = data_utils.rand_name(name="TestObject")
data = data_utils.arbitrary_string(QUOTA_BYTES + 1)
nbefore = self._get_bytes_used()
self.assertRaises(exceptions.OverLimit,
self.object_client.create_object,
self.container_name, object_name, data)
nafter = self._get_bytes_used()
self.assertEqual(nbefore, nafter)
@testtools.skipIf(not container_quotas_available, SKIP_MSG)
@attr(type="smoke")
def test_upload_too_many_objects(self):
"""Attempts to upload many objects that exceeds the count limit."""
for _ in range(QUOTA_COUNT):
name = data_utils.rand_name(name="TestObject")
self.object_client.create_object(self.container_name, name, "")
nbefore = self._get_object_count()
self.assertEqual(nbefore, QUOTA_COUNT)
self.assertRaises(exceptions.OverLimit,
self.object_client.create_object,
self.container_name, "OverQuotaObject", "")
nafter = self._get_object_count()
self.assertEqual(nbefore, nafter)
def _get_container_metadata(self):
resp, _ = self.container_client.list_container_metadata(
self.container_name)
return resp
def _get_object_count(self):
resp = self._get_container_metadata()
return int(resp["x-container-object-count"])
def _get_bytes_used(self):
resp = self._get_container_metadata()
return int(resp["x-container-bytes-used"])
|
eltonkevani/tempest_el_env
|
tempest/api/object_storage/test_container_quotas.py
|
Python
|
apache-2.0
| 4,616
|
from .Node import error
SYNTAX_NODE_SERIALIZATION_CODES = {
# 0 is 'Token'. Needs to be defined manually
# 1 is 'Unknown'. Needs to be defined manually
'UnknownDecl': 2,
'TypealiasDecl': 3,
'AssociatedtypeDecl': 4,
'IfConfigDecl': 5,
'PoundErrorDecl': 6,
'PoundWarningDecl': 7,
'PoundSourceLocation': 8,
'ClassDecl': 9,
'StructDecl': 10,
'ProtocolDecl': 11,
'ExtensionDecl': 12,
'FunctionDecl': 13,
'InitializerDecl': 14,
'DeinitializerDecl': 15,
'SubscriptDecl': 16,
'ImportDecl': 17,
'AccessorDecl': 18,
'VariableDecl': 19,
'EnumCaseDecl': 20,
'EnumDecl': 21,
'OperatorDecl': 22,
'PrecedenceGroupDecl': 23,
'UnknownExpr': 24,
'InOutExpr': 25,
'PoundColumnExpr': 26,
'TryExpr': 27,
'AwaitExpr': 249,
'IdentifierExpr': 28,
'SuperRefExpr': 29,
'NilLiteralExpr': 30,
'DiscardAssignmentExpr': 31,
'AssignmentExpr': 32,
'SequenceExpr': 33,
'PoundLineExpr': 34,
'PoundFileExpr': 35,
'PoundFunctionExpr': 36,
'PoundDsohandleExpr': 37,
'SymbolicReferenceExpr': 38,
'PrefixOperatorExpr': 39,
'BinaryOperatorExpr': 40,
'ArrowExpr': 41,
'FloatLiteralExpr': 42,
'TupleExpr': 43,
'ArrayExpr': 44,
'DictionaryExpr': 45,
'ImplicitMemberExpr': 46,
'IntegerLiteralExpr': 47,
'StringLiteralExpr': 48,
'BooleanLiteralExpr': 49,
'TernaryExpr': 50,
'MemberAccessExpr': 51,
'DotSelfExpr': 52,
'IsExpr': 53,
'AsExpr': 54,
'TypeExpr': 55,
'ClosureExpr': 56,
'UnresolvedPatternExpr': 57,
'FunctionCallExpr': 58,
'SubscriptExpr': 59,
'OptionalChainingExpr': 60,
'ForcedValueExpr': 61,
'PostfixUnaryExpr': 62,
'SpecializeExpr': 63,
'KeyPathExpr': 65,
'KeyPathBaseExpr': 66,
'ObjcKeyPathExpr': 67,
'ObjcSelectorExpr': 68,
'EditorPlaceholderExpr': 69,
'ObjectLiteralExpr': 70,
'UnknownStmt': 71,
'ContinueStmt': 72,
'WhileStmt': 73,
'DeferStmt': 74,
'ExpressionStmt': 75,
'RepeatWhileStmt': 76,
'GuardStmt': 77,
'ForInStmt': 78,
'SwitchStmt': 79,
'DoStmt': 80,
'ReturnStmt': 81,
'FallthroughStmt': 82,
'BreakStmt': 83,
'DeclarationStmt': 84,
'ThrowStmt': 85,
'IfStmt': 86,
'Decl': 87,
'Expr': 88,
'Stmt': 89,
'Type': 90,
'Pattern': 91,
'CodeBlockItem': 92,
'CodeBlock': 93,
'DeclNameArgument': 94,
'DeclNameArguments': 95,
# removed: 'FunctionCallArgument': 96,
'TupleExprElement': 97,
'ArrayElement': 98,
'DictionaryElement': 99,
'ClosureCaptureItem': 100,
'ClosureCaptureSignature': 101,
'ClosureParam': 102,
'ClosureSignature': 103,
'StringSegment': 104,
'ExpressionSegment': 105,
'ObjcNamePiece': 106,
'TypeInitializerClause': 107,
'ParameterClause': 108,
'ReturnClause': 109,
'FunctionSignature': 110,
'IfConfigClause': 111,
'PoundSourceLocationArgs': 112,
'DeclModifier': 113,
'InheritedType': 114,
'TypeInheritanceClause': 115,
'MemberDeclBlock': 116,
'MemberDeclListItem': 117,
'SourceFile': 118,
'InitializerClause': 119,
'FunctionParameter': 120,
'AccessLevelModifier': 121,
'AccessPathComponent': 122,
'AccessorParameter': 123,
'AccessorBlock': 124,
'PatternBinding': 125,
'EnumCaseElement': 126,
'OperatorPrecedenceAndTypes': 127,
'PrecedenceGroupRelation': 128,
'PrecedenceGroupNameElement': 129,
'PrecedenceGroupAssignment': 130,
'PrecedenceGroupAssociativity': 131,
'Attribute': 132,
'LabeledSpecializeEntry': 133,
'ImplementsAttributeArguments': 134,
'ObjCSelectorPiece': 135,
'WhereClause': 136,
'ConditionElement': 137,
'AvailabilityCondition': 138,
'MatchingPatternCondition': 139,
'OptionalBindingCondition': 140,
'ElseIfContinuation': 141,
'ElseBlock': 142,
'SwitchCase': 143,
'SwitchDefaultLabel': 144,
'CaseItem': 145,
'SwitchCaseLabel': 146,
'CatchClause': 147,
'GenericWhereClause': 148,
'SameTypeRequirement': 149,
'GenericParameter': 150,
'GenericParameterClause': 151,
'ConformanceRequirement': 152,
'CompositionTypeElement': 153,
'TupleTypeElement': 154,
'GenericArgument': 155,
'GenericArgumentClause': 156,
'TypeAnnotation': 157,
'TuplePatternElement': 158,
'AvailabilityArgument': 159,
'AvailabilityLabeledArgument': 160,
'AvailabilityVersionRestriction': 161,
'VersionTuple': 162,
'CodeBlockItemList': 163,
# removed: 'FunctionCallArgumentList': 164,
'TupleExprElementList': 165,
'ArrayElementList': 166,
'DictionaryElementList': 167,
'StringLiteralSegments': 168,
'DeclNameArgumentList': 169,
'ExprList': 170,
'ClosureCaptureItemList': 171,
'ClosureParamList': 172,
'ObjcName': 173,
'FunctionParameterList': 174,
'IfConfigClauseList': 175,
'InheritedTypeList': 176,
'MemberDeclList': 177,
'ModifierList': 178,
'AccessPath': 179,
'AccessorList': 180,
'PatternBindingList': 181,
'EnumCaseElementList': 182,
'PrecedenceGroupAttributeList': 183,
'PrecedenceGroupNameList': 184,
'TokenList': 185,
'NonEmptyTokenList': 186,
'AttributeList': 187,
'SpecializeAttributeSpecList': 188,
'ObjCSelector': 189,
'SwitchCaseList': 190,
'CatchClauseList': 191,
'CaseItemList': 192,
'ConditionElementList': 193,
'GenericRequirementList': 194,
'GenericParameterList': 195,
'CompositionTypeElementList': 196,
'TupleTypeElementList': 197,
'GenericArgumentList': 198,
'TuplePatternElementList': 199,
'AvailabilitySpecList': 200,
'UnknownPattern': 201,
'EnumCasePattern': 202,
'IsTypePattern': 203,
'OptionalPattern': 204,
'IdentifierPattern': 205,
'AsTypePattern': 206,
'TuplePattern': 207,
'WildcardPattern': 208,
'ExpressionPattern': 209,
'ValueBindingPattern': 210,
'UnknownType': 211,
'SimpleTypeIdentifier': 212,
'MemberTypeIdentifier': 213,
'ClassRestrictionType': 214,
'ArrayType': 215,
'DictionaryType': 216,
'MetatypeType': 217,
'OptionalType': 218,
'ImplicitlyUnwrappedOptionalType': 219,
'CompositionType': 220,
'TupleType': 221,
'FunctionType': 222,
'AttributedType': 223,
'YieldStmt': 224,
'YieldList': 225,
'IdentifierList': 226,
'NamedAttributeStringArgument': 227,
'DeclName': 228,
'PoundAssertStmt': 229,
'SomeType': 230,
'CustomAttribute': 231,
'GenericRequirement': 232,
'DifferentiableAttributeArguments': 233,
'DifferentiabilityParamsClause': 234,
'DifferentiabilityParams': 235,
'DifferentiabilityParamList': 236,
'DifferentiabilityParam': 237,
# removed: 'DifferentiableAttributeFuncSpecifier': 238,
'FunctionDeclName': 239,
'PoundFilePathExpr': 240,
'DerivativeRegistrationAttributeArguments': 241,
'QualifiedDeclName': 242,
'CatchItem': 243,
'CatchItemList': 244,
'MultipleTrailingClosureElementList': 245,
'MultipleTrailingClosureElement': 246,
'PoundFileIDExpr': 247,
'TargetFunctionEntry': 248,
}
def verify_syntax_node_serialization_codes(nodes, serialization_codes):
# Verify that all nodes have serialization codes
for node in nodes:
if not node.is_base() and node.syntax_kind not in serialization_codes:
error('Node %s has no serialization code' % node.syntax_kind)
# Verify that no serialization code is used twice
used_codes = set()
for serialization_code in serialization_codes.values():
if serialization_code in used_codes:
error("Serialization code %d used twice" % serialization_code)
used_codes.add(serialization_code)
def get_serialization_code(syntax_kind):
return SYNTAX_NODE_SERIALIZATION_CODES[syntax_kind]
|
atrick/swift
|
utils/gyb_syntax_support/NodeSerializationCodes.py
|
Python
|
apache-2.0
| 7,910
|
"""Support for RFXtrx devices."""
import binascii
from collections import OrderedDict
import logging
import RFXtrx as rfxtrxmod
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_NAME,
ATTR_STATE,
CONF_DEVICE,
CONF_DEVICES,
CONF_HOST,
CONF_PORT,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
POWER_WATT,
TEMP_CELSIUS,
UNIT_PERCENTAGE,
UV_INDEX,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import slugify
DOMAIN = "rfxtrx"
DEFAULT_SIGNAL_REPETITIONS = 1
ATTR_AUTOMATIC_ADD = "automatic_add"
ATTR_DEVICE = "device"
ATTR_DEBUG = "debug"
ATTR_FIRE_EVENT = "fire_event"
ATTR_DATA_TYPE = "data_type"
ATTR_DUMMY = "dummy"
CONF_DATA_BITS = "data_bits"
CONF_AUTOMATIC_ADD = "automatic_add"
CONF_DATA_TYPE = "data_type"
CONF_SIGNAL_REPETITIONS = "signal_repetitions"
CONF_FIRE_EVENT = "fire_event"
CONF_DUMMY = "dummy"
CONF_DEBUG = "debug"
CONF_OFF_DELAY = "off_delay"
EVENT_BUTTON_PRESSED = "button_pressed"
DATA_TYPES = OrderedDict(
[
("Temperature", TEMP_CELSIUS),
("Temperature2", TEMP_CELSIUS),
("Humidity", UNIT_PERCENTAGE),
("Barometer", ""),
("Wind direction", ""),
("Rain rate", ""),
("Energy usage", POWER_WATT),
("Total usage", POWER_WATT),
("Sound", ""),
("Sensor Status", ""),
("Counter value", ""),
("UV", UV_INDEX),
("Humidity status", ""),
("Forecast", ""),
("Forecast numeric", ""),
("Rain total", ""),
("Wind average speed", ""),
("Wind gust", ""),
("Chill", ""),
("Total usage", ""),
("Count", ""),
("Current Ch. 1", ""),
("Current Ch. 2", ""),
("Current Ch. 3", ""),
("Energy usage", ""),
("Voltage", ""),
("Current", ""),
("Battery numeric", ""),
("Rssi numeric", ""),
]
)
RECEIVED_EVT_SUBSCRIBERS = []
RFX_DEVICES = {}
_LOGGER = logging.getLogger(__name__)
DATA_RFXOBJECT = "rfxobject"
BASE_SCHEMA = vol.Schema(
{
vol.Optional(CONF_DEBUG, default=False): cv.boolean,
vol.Optional(CONF_DUMMY, default=False): cv.boolean,
}
)
DEVICE_SCHEMA = BASE_SCHEMA.extend({vol.Required(CONF_DEVICE): cv.string})
PORT_SCHEMA = BASE_SCHEMA.extend(
{vol.Required(CONF_PORT): cv.port, vol.Optional(CONF_HOST): cv.string}
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Any(DEVICE_SCHEMA, PORT_SCHEMA)}, extra=vol.ALLOW_EXTRA
)
def setup(hass, config):
"""Set up the RFXtrx component."""
# Declare the Handle event
def handle_receive(event):
"""Handle received messages from RFXtrx gateway."""
# Log RFXCOM event
if not event.device.id_string:
return
_LOGGER.debug(
"Receive RFXCOM event from "
"(Device_id: %s Class: %s Sub: %s, Pkt_id: %s)",
slugify(event.device.id_string.lower()),
event.device.__class__.__name__,
event.device.subtype,
"".join(f"{x:02x}" for x in event.data),
)
# Callback to HA registered components.
for subscriber in RECEIVED_EVT_SUBSCRIBERS:
subscriber(event)
device = config[DOMAIN].get(ATTR_DEVICE)
host = config[DOMAIN].get(CONF_HOST)
port = config[DOMAIN].get(CONF_PORT)
debug = config[DOMAIN][ATTR_DEBUG]
dummy_connection = config[DOMAIN][ATTR_DUMMY]
if dummy_connection:
rfx_object = rfxtrxmod.Connect(
device, None, debug=debug, transport_protocol=rfxtrxmod.DummyTransport2,
)
elif port is not None:
# If port is set then we create a TCP connection
rfx_object = rfxtrxmod.Connect(
(host, port),
None,
debug=debug,
transport_protocol=rfxtrxmod.PyNetworkTransport,
)
else:
rfx_object = rfxtrxmod.Connect(device, None, debug=debug)
def _start_rfxtrx(event):
rfx_object.event_callback = handle_receive
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, _start_rfxtrx)
def _shutdown_rfxtrx(event):
"""Close connection with RFXtrx."""
rfx_object.close_connection()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, _shutdown_rfxtrx)
hass.data[DATA_RFXOBJECT] = rfx_object
return True
def get_rfx_object(packetid):
"""Return the RFXObject with the packetid."""
try:
binarypacket = bytearray.fromhex(packetid)
except ValueError:
return None
pkt = rfxtrxmod.lowlevel.parse(binarypacket)
if pkt is None:
return None
if isinstance(pkt, rfxtrxmod.lowlevel.SensorPacket):
obj = rfxtrxmod.SensorEvent(pkt)
elif isinstance(pkt, rfxtrxmod.lowlevel.Status):
obj = rfxtrxmod.StatusEvent(pkt)
else:
obj = rfxtrxmod.ControlEvent(pkt)
return obj
def get_pt2262_deviceid(device_id, nb_data_bits):
"""Extract and return the address bits from a Lighting4/PT2262 packet."""
if nb_data_bits is None:
return
try:
data = bytearray.fromhex(device_id)
except ValueError:
return None
mask = 0xFF & ~((1 << nb_data_bits) - 1)
data[len(data) - 1] &= mask
return binascii.hexlify(data)
def get_pt2262_cmd(device_id, data_bits):
"""Extract and return the data bits from a Lighting4/PT2262 packet."""
try:
data = bytearray.fromhex(device_id)
except ValueError:
return None
mask = 0xFF & ((1 << data_bits) - 1)
return hex(data[-1] & mask)
def get_pt2262_device(device_id):
"""Look for the device which id matches the given device_id parameter."""
for device in RFX_DEVICES.values():
if (
hasattr(device, "is_lighting4")
and device.masked_id is not None
and device.masked_id == get_pt2262_deviceid(device_id, device.data_bits)
):
_LOGGER.debug(
"rfxtrx: found matching device %s for %s", device_id, device.masked_id,
)
return device
return None
def find_possible_pt2262_device(device_id):
"""Look for the device which id matches the given device_id parameter."""
for dev_id, device in RFX_DEVICES.items():
if hasattr(device, "is_lighting4") and len(dev_id) == len(device_id):
size = None
for i, (char1, char2) in enumerate(zip(dev_id, device_id)):
if char1 != char2:
break
size = i
if size is not None:
size = len(dev_id) - size - 1
_LOGGER.info(
"rfxtrx: found possible device %s for %s "
"with the following configuration:\n"
"data_bits=%d\n"
"command_on=0x%s\n"
"command_off=0x%s\n",
device_id,
dev_id,
size * 4,
dev_id[-size:],
device_id[-size:],
)
return device
return None
def get_devices_from_config(config, device):
"""Read rfxtrx configuration."""
signal_repetitions = config[CONF_SIGNAL_REPETITIONS]
devices = []
for packet_id, entity_info in config[CONF_DEVICES].items():
event = get_rfx_object(packet_id)
if event is None:
_LOGGER.error("Invalid device: %s", packet_id)
continue
device_id = slugify(event.device.id_string.lower())
if device_id in RFX_DEVICES:
continue
_LOGGER.debug("Add %s rfxtrx", entity_info[ATTR_NAME])
# Check if i must fire event
fire_event = entity_info[ATTR_FIRE_EVENT]
datas = {ATTR_STATE: False, ATTR_FIRE_EVENT: fire_event}
new_device = device(entity_info[ATTR_NAME], event, datas, signal_repetitions)
RFX_DEVICES[device_id] = new_device
devices.append(new_device)
return devices
def get_new_device(event, config, device):
"""Add entity if not exist and the automatic_add is True."""
device_id = slugify(event.device.id_string.lower())
if device_id in RFX_DEVICES:
return
if not config[ATTR_AUTOMATIC_ADD]:
return
pkt_id = "".join(f"{x:02x}" for x in event.data)
_LOGGER.debug(
"Automatic add %s rfxtrx device (Class: %s Sub: %s Packet_id: %s)",
device_id,
event.device.__class__.__name__,
event.device.subtype,
pkt_id,
)
datas = {ATTR_STATE: False, ATTR_FIRE_EVENT: False}
signal_repetitions = config[CONF_SIGNAL_REPETITIONS]
new_device = device(pkt_id, event, datas, signal_repetitions)
RFX_DEVICES[device_id] = new_device
return new_device
def apply_received_command(event):
"""Apply command from rfxtrx."""
device_id = slugify(event.device.id_string.lower())
# Check if entity exists or previously added automatically
if device_id not in RFX_DEVICES:
return
_LOGGER.debug(
"Device_id: %s device_update. Command: %s", device_id, event.values["Command"],
)
if event.values["Command"] in [
"On",
"Off",
"Up",
"Down",
"Stop",
"Open (inline relay)",
"Close (inline relay)",
"Stop (inline relay)",
]:
# Update the rfxtrx device state
command = event.values["Command"]
if command in [
"On",
"Up",
"Stop",
"Open (inline relay)",
"Stop (inline relay)",
]:
is_on = True
elif command in ["Off", "Down", "Close (inline relay)"]:
is_on = False
RFX_DEVICES[device_id].update_state(is_on)
elif (
hasattr(RFX_DEVICES[device_id], "brightness")
and event.values["Command"] == "Set level"
):
_brightness = event.values["Dim level"] * 255 // 100
# Update the rfxtrx device state
is_on = _brightness > 0
RFX_DEVICES[device_id].update_state(is_on, _brightness)
# Fire event
if RFX_DEVICES[device_id].should_fire_event:
RFX_DEVICES[device_id].hass.bus.fire(
EVENT_BUTTON_PRESSED,
{
ATTR_ENTITY_ID: RFX_DEVICES[device_id].entity_id,
ATTR_STATE: event.values["Command"].lower(),
},
)
_LOGGER.debug(
"Rfxtrx fired event: (event_type: %s, %s: %s, %s: %s)",
EVENT_BUTTON_PRESSED,
ATTR_ENTITY_ID,
RFX_DEVICES[device_id].entity_id,
ATTR_STATE,
event.values["Command"].lower(),
)
class RfxtrxDevice(Entity):
"""Represents a Rfxtrx device.
Contains the common logic for Rfxtrx lights and switches.
"""
def __init__(self, name, event, datas, signal_repetitions):
"""Initialize the device."""
self.signal_repetitions = signal_repetitions
self._name = name
self._event = event
self._state = datas[ATTR_STATE]
self._should_fire_event = datas[ATTR_FIRE_EVENT]
self._brightness = 0
self._unique_id = f"{slugify(self._event.device.type_string.lower())}_{slugify(self._event.device.id_string.lower())}"
self.added_to_hass = False
async def async_added_to_hass(self):
"""Subscribe RFXtrx events."""
self.added_to_hass = True
@property
def should_poll(self):
"""No polling needed for a RFXtrx switch."""
return False
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def should_fire_event(self):
"""Return is the device must fire event."""
return self._should_fire_event
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def assumed_state(self):
"""Return true if unable to access real state of entity."""
return True
@property
def unique_id(self):
"""Return unique identifier of remote device."""
return self._unique_id
def turn_off(self, **kwargs):
"""Turn the device off."""
self._send_command("turn_off")
def update_state(self, state, brightness=0):
"""Update det state of the device."""
self._state = state
self._brightness = brightness
if self.added_to_hass:
self.schedule_update_ha_state()
def _send_command(self, command, brightness=0):
if not self._event:
return
rfx_object = self.hass.data[DATA_RFXOBJECT]
if command == "turn_on":
for _ in range(self.signal_repetitions):
self._event.device.send_on(rfx_object.transport)
self._state = True
elif command == "dim":
for _ in range(self.signal_repetitions):
self._event.device.send_dim(rfx_object.transport, brightness)
self._state = True
elif command == "turn_off":
for _ in range(self.signal_repetitions):
self._event.device.send_off(rfx_object.transport)
self._state = False
self._brightness = 0
elif command == "roll_up":
for _ in range(self.signal_repetitions):
self._event.device.send_open(rfx_object.transport)
self._state = True
elif command == "roll_down":
for _ in range(self.signal_repetitions):
self._event.device.send_close(rfx_object.transport)
self._state = False
elif command == "stop_roll":
for _ in range(self.signal_repetitions):
self._event.device.send_stop(rfx_object.transport)
self._state = True
if self.added_to_hass:
self.schedule_update_ha_state()
|
nkgilley/home-assistant
|
homeassistant/components/rfxtrx/__init__.py
|
Python
|
apache-2.0
| 13,930
|
def split_camel_case(input):
def remove_camel_case(camel_case_input):
no_camel_case = ""
if len(camel_case_input) <= 0:
return ""
no_camel_case += camel_case_input[0].lower()
for c in camel_case_input[1:]:
if c.isupper():
no_camel_case += "_" + c.lower()
else:
no_camel_case += c
return no_camel_case
underscore_split = input.split("_")
retval = ""
for i in underscore_split:
if is_camel_case_name(i):
retval += remove_camel_case(i) + "_"
else:
retval += i + "_"
return retval[:-1].replace("__", "_")
def is_camel_case_name(input):
if '_' in input:
return False
if input.islower():
return False
if input.isupper():
return False
return True
|
blxble/mesh-core-on-nrf
|
nrf5_sdk/external/nano-pb/generator/camel_case_splitter.py
|
Python
|
apache-2.0
| 854
|
# Copyright 2012 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mandeep Dhami, Big Switch Networks, Inc.
# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc.
"""
Neutron REST Proxy Plug-in for Big Switch and FloodLight Controllers.
NeutronRestProxy provides a generic neutron plugin that translates all plugin
function calls to equivalent authenticated REST calls to a set of redundant
external network controllers. It also keeps persistent store for all neutron
state to allow for re-sync of the external controller(s), if required.
The local state on the plugin also allows for local response and fast-fail
semantics where it can be determined based on the local persistent store.
Network controller specific code is decoupled from this plugin and expected
to reside on the controller itself (via the REST interface).
This allows for:
- independent authentication and redundancy schemes between neutron and the
network controller
- independent upgrade/development cycles between neutron and the controller
as it limits the proxy code upgrade requirement to neutron release cycle
and the controller specific code upgrade requirement to controller code
- ability to sync the controller with neutron for independent recovery/reset
External REST API used by proxy is the same API as defined for neutron (JSON
subset) with some additional parameters (gateway on network-create and macaddr
on port-attach) on an additional PUT to do a bulk dump of all persistent data.
"""
import copy
import functools
import httplib
import re
import eventlet
from oslo.config import cfg
from sqlalchemy.orm import exc as sqlexc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api import extensions as neutron_extensions
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.common import constants as const
from neutron.common import exceptions
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils
from neutron import context as qcontext
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import allowedaddresspairs_db as addr_pair_db
from neutron.db import api as db
from neutron.db import db_base_plugin_v2
from neutron.db import dhcp_rpc_base
from neutron.db import external_net_db
from neutron.db import extradhcpopt_db
from neutron.db import l3_db
from neutron.db import models_v2
from neutron.db import securitygroups_db as sg_db
from neutron.db import securitygroups_rpc_base as sg_rpc_base
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import external_net
from neutron.extensions import extra_dhcp_opt as edo_ext
from neutron.extensions import l3
from neutron.extensions import portbindings
from neutron import manager
from neutron.openstack.common import excutils
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.plugins.bigswitch import config as pl_config
from neutron.plugins.bigswitch.db import porttracker_db
from neutron.plugins.bigswitch import extensions
from neutron.plugins.bigswitch import routerrule_db
from neutron.plugins.bigswitch import servermanager
from neutron.plugins.bigswitch import version
LOG = logging.getLogger(__name__)
SYNTAX_ERROR_MESSAGE = _('Syntax error in server config file, aborting plugin')
METADATA_SERVER_IP = '169.254.169.254'
class AgentNotifierApi(n_rpc.RpcProxy,
sg_rpc.SecurityGroupAgentRpcApiMixin):
BASE_RPC_API_VERSION = '1.1'
def __init__(self, topic):
super(AgentNotifierApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.topic_port_update = topics.get_topic_name(
topic, topics.PORT, topics.UPDATE)
def port_update(self, context, port):
self.fanout_cast(context,
self.make_msg('port_update',
port=port),
topic=self.topic_port_update)
class RestProxyCallbacks(n_rpc.RpcCallback,
sg_rpc_base.SecurityGroupServerRpcCallbackMixin,
dhcp_rpc_base.DhcpRpcCallbackMixin):
RPC_API_VERSION = '1.1'
def get_port_from_device(self, device):
port_id = re.sub(r"^tap", "", device)
port = self.get_port_and_sgs(port_id)
if port:
port['device'] = device
return port
def get_port_and_sgs(self, port_id):
"""Get port from database with security group info."""
LOG.debug(_("get_port_and_sgs() called for port_id %s"), port_id)
session = db.get_session()
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
with session.begin(subtransactions=True):
query = session.query(
models_v2.Port,
sg_db.SecurityGroupPortBinding.security_group_id
)
query = query.outerjoin(sg_db.SecurityGroupPortBinding,
models_v2.Port.id == sg_binding_port)
query = query.filter(models_v2.Port.id.startswith(port_id))
port_and_sgs = query.all()
if not port_and_sgs:
return
port = port_and_sgs[0][0]
plugin = manager.NeutronManager.get_plugin()
port_dict = plugin._make_port_dict(port)
port_dict['security_groups'] = [
sg_id for port_, sg_id in port_and_sgs if sg_id]
port_dict['security_group_rules'] = []
port_dict['security_group_source_groups'] = []
port_dict['fixed_ips'] = [ip['ip_address']
for ip in port['fixed_ips']]
return port_dict
class NeutronRestProxyV2Base(db_base_plugin_v2.NeutronDbPluginV2,
external_net_db.External_net_db_mixin,
routerrule_db.RouterRule_db_mixin):
supported_extension_aliases = ["binding"]
servers = None
def _get_all_data(self, get_ports=True, get_floating_ips=True,
get_routers=True):
admin_context = qcontext.get_admin_context()
networks = []
# this method is used by the ML2 driver so it can't directly invoke
# the self.get_(ports|networks) methods
plugin = manager.NeutronManager.get_plugin()
all_networks = plugin.get_networks(admin_context) or []
for net in all_networks:
mapped_network = self._get_mapped_network_with_subnets(net)
flips_n_ports = mapped_network
if get_floating_ips:
flips_n_ports = self._get_network_with_floatingips(
mapped_network)
if get_ports:
ports = []
net_filter = {'network_id': [net.get('id')]}
net_ports = plugin.get_ports(admin_context,
filters=net_filter) or []
for port in net_ports:
mapped_port = self._map_state_and_status(port)
mapped_port['attachment'] = {
'id': port.get('device_id'),
'mac': port.get('mac_address'),
}
mapped_port = self._extend_port_dict_binding(admin_context,
mapped_port)
ports.append(mapped_port)
flips_n_ports['ports'] = ports
if flips_n_ports:
networks.append(flips_n_ports)
data = {'networks': networks}
if get_routers:
routers = []
all_routers = self.get_routers(admin_context) or []
for router in all_routers:
interfaces = []
mapped_router = self._map_state_and_status(router)
router_filter = {
'device_owner': [const.DEVICE_OWNER_ROUTER_INTF],
'device_id': [router.get('id')]
}
router_ports = self.get_ports(admin_context,
filters=router_filter) or []
for port in router_ports:
net_id = port.get('network_id')
subnet_id = port['fixed_ips'][0]['subnet_id']
intf_details = self._get_router_intf_details(admin_context,
net_id,
subnet_id)
interfaces.append(intf_details)
mapped_router['interfaces'] = interfaces
routers.append(mapped_router)
data.update({'routers': routers})
return data
def _send_all_data(self, send_ports=True, send_floating_ips=True,
send_routers=True, timeout=None,
triggered_by_tenant=None):
"""Pushes all data to network ctrl (networks/ports, ports/attachments).
This gives the controller an option to re-sync it's persistent store
with neutron's current view of that data.
"""
data = self._get_all_data(send_ports, send_floating_ips, send_routers)
data['triggered_by_tenant'] = triggered_by_tenant
errstr = _("Unable to update remote topology: %s")
return self.servers.rest_action('PUT', servermanager.TOPOLOGY_PATH,
data, errstr, timeout=timeout)
def _get_network_with_floatingips(self, network, context=None):
if context is None:
context = qcontext.get_admin_context()
net_id = network['id']
net_filter = {'floating_network_id': [net_id]}
fl_ips = self.get_floatingips(context,
filters=net_filter) or []
network['floatingips'] = fl_ips
return network
def _get_all_subnets_json_for_network(self, net_id, context=None):
if context is None:
context = qcontext.get_admin_context()
# start a sub-transaction to avoid breaking parent transactions
with context.session.begin(subtransactions=True):
subnets = self._get_subnets_by_network(context,
net_id)
subnets_details = []
if subnets:
for subnet in subnets:
subnet_dict = self._make_subnet_dict(subnet)
mapped_subnet = self._map_state_and_status(subnet_dict)
subnets_details.append(mapped_subnet)
return subnets_details
def _get_mapped_network_with_subnets(self, network, context=None):
# if context is not provided, admin context is used
if context is None:
context = qcontext.get_admin_context()
network = self._map_state_and_status(network)
subnets = self._get_all_subnets_json_for_network(network['id'],
context)
network['subnets'] = subnets
for subnet in (subnets or []):
if subnet['gateway_ip']:
# FIX: For backward compatibility with wire protocol
network['gateway'] = subnet['gateway_ip']
break
else:
network['gateway'] = ''
network[external_net.EXTERNAL] = self._network_is_external(
context, network['id'])
# include ML2 segmentation types
network['segmentation_types'] = getattr(self, "segmentation_types", "")
return network
def _send_create_network(self, network, context=None):
tenant_id = network['tenant_id']
mapped_network = self._get_mapped_network_with_subnets(network,
context)
self.servers.rest_create_network(tenant_id, mapped_network)
def _send_update_network(self, network, context=None):
net_id = network['id']
tenant_id = network['tenant_id']
mapped_network = self._get_mapped_network_with_subnets(network,
context)
net_fl_ips = self._get_network_with_floatingips(mapped_network,
context)
self.servers.rest_update_network(tenant_id, net_id, net_fl_ips)
def _send_delete_network(self, network, context=None):
net_id = network['id']
tenant_id = network['tenant_id']
self.servers.rest_delete_network(tenant_id, net_id)
def _map_state_and_status(self, resource):
resource = copy.copy(resource)
resource['state'] = ('UP' if resource.pop('admin_state_up',
True) else 'DOWN')
resource.pop('status', None)
return resource
def _warn_on_state_status(self, resource):
if resource.get('admin_state_up', True) is False:
LOG.warning(_("Setting admin_state_up=False is not supported "
"in this plugin version. Ignoring setting for "
"resource: %s"), resource)
if 'status' in resource:
if resource['status'] != const.NET_STATUS_ACTIVE:
LOG.warning(_("Operational status is internally set by the "
"plugin. Ignoring setting status=%s."),
resource['status'])
def _get_router_intf_details(self, context, intf_id, subnet_id):
# we will use the network id as interface's id
net_id = intf_id
network = self.get_network(context, net_id)
subnet = self.get_subnet(context, subnet_id)
mapped_network = self._get_mapped_network_with_subnets(network)
mapped_subnet = self._map_state_and_status(subnet)
data = {
'id': intf_id,
"network": mapped_network,
"subnet": mapped_subnet
}
return data
def _extend_port_dict_binding(self, context, port):
cfg_vif_type = cfg.CONF.NOVA.vif_type.lower()
if not cfg_vif_type in (portbindings.VIF_TYPE_OVS,
portbindings.VIF_TYPE_IVS):
LOG.warning(_("Unrecognized vif_type in configuration "
"[%s]. Defaulting to ovs."),
cfg_vif_type)
cfg_vif_type = portbindings.VIF_TYPE_OVS
# In ML2, the host_id is already populated
if portbindings.HOST_ID in port:
hostid = port[portbindings.HOST_ID]
else:
hostid = porttracker_db.get_port_hostid(context, port['id'])
if hostid:
port[portbindings.HOST_ID] = hostid
override = self._check_hostvif_override(hostid)
if override:
cfg_vif_type = override
port[portbindings.VIF_TYPE] = cfg_vif_type
port[portbindings.VIF_DETAILS] = {
# TODO(rkukura): Replace with new VIF security details
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases,
portbindings.OVS_HYBRID_PLUG: True
}
return port
def _check_hostvif_override(self, hostid):
for v in cfg.CONF.NOVA.vif_types:
if hostid in getattr(cfg.CONF.NOVA, "node_override_vif_" + v, []):
return v
return False
def _get_port_net_tenantid(self, context, port):
net = super(NeutronRestProxyV2Base,
self).get_network(context, port["network_id"])
return net['tenant_id']
def async_port_create(self, tenant_id, net_id, port):
try:
self.servers.rest_create_port(tenant_id, net_id, port)
except servermanager.RemoteRestError as e:
# 404 should never be received on a port create unless
# there are inconsistencies between the data in neutron
# and the data in the backend.
# Run a sync to get it consistent.
if (cfg.CONF.RESTPROXY.auto_sync_on_failure and
e.status == httplib.NOT_FOUND and
servermanager.NXNETWORK in e.reason):
LOG.error(_("Iconsistency with backend controller "
"triggering full synchronization."))
# args depend on if we are operating in ML2 driver
# or as the full plugin
topoargs = self.servers.get_topo_function_args
self._send_all_data(
send_ports=topoargs['get_ports'],
send_floating_ips=topoargs['get_floating_ips'],
send_routers=topoargs['get_routers'],
triggered_by_tenant=tenant_id
)
# If the full sync worked, the port will be created
# on the controller so it can be safely marked as active
else:
# Any errors that don't result in a successful auto-sync
# require that the port be placed into the error state.
LOG.error(
_("NeutronRestProxyV2: Unable to create port: %s"), e)
try:
self._set_port_status(port['id'], const.PORT_STATUS_ERROR)
except exceptions.PortNotFound:
# If port is already gone from DB and there was an error
# creating on the backend, everything is already consistent
pass
return
new_status = (const.PORT_STATUS_ACTIVE if port['state'] == 'UP'
else const.PORT_STATUS_DOWN)
try:
self._set_port_status(port['id'], new_status)
except exceptions.PortNotFound:
# This port was deleted before the create made it to the controller
# so it now needs to be deleted since the normal delete request
# would have deleted an non-existent port.
self.servers.rest_delete_port(tenant_id, net_id, port['id'])
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock
@utils.synchronized('bsn-port-barrier')
def _set_port_status(self, port_id, status):
session = db.get_session()
try:
port = session.query(models_v2.Port).filter_by(id=port_id).one()
port['status'] = status
session.flush()
except sqlexc.NoResultFound:
raise exceptions.PortNotFound(port_id=port_id)
def put_context_in_serverpool(f):
@functools.wraps(f)
def wrapper(self, context, *args, **kwargs):
self.servers.set_context(context)
return f(self, context, *args, **kwargs)
return wrapper
class NeutronRestProxyV2(NeutronRestProxyV2Base,
addr_pair_db.AllowedAddressPairsMixin,
extradhcpopt_db.ExtraDhcpOptMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
sg_rpc_base.SecurityGroupServerRpcMixin):
_supported_extension_aliases = ["external-net", "router", "binding",
"router_rules", "extra_dhcp_opt", "quotas",
"dhcp_agent_scheduler", "agent",
"security-group", "allowed-address-pairs"]
@property
def supported_extension_aliases(self):
if not hasattr(self, '_aliases'):
aliases = self._supported_extension_aliases[:]
sg_rpc.disable_security_group_extension_by_config(aliases)
self._aliases = aliases
return self._aliases
def __init__(self):
super(NeutronRestProxyV2, self).__init__()
LOG.info(_('NeutronRestProxy: Starting plugin. Version=%s'),
version.version_string_with_vcs())
pl_config.register_config()
self.evpool = eventlet.GreenPool(cfg.CONF.RESTPROXY.thread_pool_size)
# Include the Big Switch Extensions path in the api_extensions
neutron_extensions.append_api_extensions_path(extensions.__path__)
self.add_meta_server_route = cfg.CONF.RESTPROXY.add_meta_server_route
# init network ctrl connections
self.servers = servermanager.ServerPool()
self.servers.get_topo_function = self._get_all_data
self.servers.get_topo_function_args = {'get_ports': True,
'get_floating_ips': True,
'get_routers': True}
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver
)
# setup rpc for security and DHCP agents
self._setup_rpc()
if cfg.CONF.RESTPROXY.sync_data:
self._send_all_data()
LOG.debug(_("NeutronRestProxyV2: initialization done"))
def _setup_rpc(self):
self.conn = n_rpc.create_connection(new=True)
self.topic = topics.PLUGIN
self.notifier = AgentNotifierApi(topics.AGENT)
# init dhcp agent support
self._dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
self.agent_notifiers[const.AGENT_TYPE_DHCP] = (
self._dhcp_agent_notifier
)
self.endpoints = [RestProxyCallbacks(),
agents_db.AgentExtRpcCallback()]
self.conn.create_consumer(self.topic, self.endpoints,
fanout=False)
# Consume from all consumers in threads
self.conn.consume_in_threads()
@put_context_in_serverpool
def create_network(self, context, network):
"""Create a network.
Network represents an L2 network segment which can have a set of
subnets and ports associated with it.
:param context: neutron api request context
:param network: dictionary describing the network
:returns: a sequence of mappings with the following signature:
{
"id": UUID representing the network.
"name": Human-readable name identifying the network.
"tenant_id": Owner of network. NOTE: only admin user can specify
a tenant_id other than its own.
"admin_state_up": Sets admin state of network.
if down, network does not forward packets.
"status": Indicates whether network is currently operational
(values are "ACTIVE", "DOWN", "BUILD", and "ERROR")
"subnets": Subnets associated with this network.
}
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: create_network() called"))
self._warn_on_state_status(network['network'])
with context.session.begin(subtransactions=True):
self._ensure_default_security_group(
context,
network['network']["tenant_id"]
)
# create network in DB
new_net = super(NeutronRestProxyV2, self).create_network(context,
network)
self._process_l3_create(context, new_net, network['network'])
# create network on the network controller
self._send_create_network(new_net, context)
# return created network
return new_net
@put_context_in_serverpool
def update_network(self, context, net_id, network):
"""Updates the properties of a particular Virtual Network.
:param context: neutron api request context
:param net_id: uuid of the network to update
:param network: dictionary describing the updates
:returns: a sequence of mappings with the following signature:
{
"id": UUID representing the network.
"name": Human-readable name identifying the network.
"tenant_id": Owner of network. NOTE: only admin user can
specify a tenant_id other than its own.
"admin_state_up": Sets admin state of network.
if down, network does not forward packets.
"status": Indicates whether network is currently operational
(values are "ACTIVE", "DOWN", "BUILD", and "ERROR")
"subnets": Subnets associated with this network.
}
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2.update_network() called"))
self._warn_on_state_status(network['network'])
session = context.session
with session.begin(subtransactions=True):
new_net = super(NeutronRestProxyV2, self).update_network(
context, net_id, network)
self._process_l3_update(context, new_net, network['network'])
# update network on network controller
self._send_update_network(new_net, context)
return new_net
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock
@utils.synchronized('bsn-port-barrier')
@put_context_in_serverpool
def delete_network(self, context, net_id):
"""Delete a network.
:param context: neutron api request context
:param id: UUID representing the network to delete.
:returns: None
:raises: exceptions.NetworkInUse
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: delete_network() called"))
# Validate args
orig_net = super(NeutronRestProxyV2, self).get_network(context, net_id)
with context.session.begin(subtransactions=True):
self._process_l3_delete(context, net_id)
ret_val = super(NeutronRestProxyV2, self).delete_network(context,
net_id)
self._send_delete_network(orig_net, context)
return ret_val
@put_context_in_serverpool
def create_port(self, context, port):
"""Create a port, which is a connection point of a device
(e.g., a VM NIC) to attach to a L2 Neutron network.
:param context: neutron api request context
:param port: dictionary describing the port
:returns:
{
"id": uuid represeting the port.
"network_id": uuid of network.
"tenant_id": tenant_id
"mac_address": mac address to use on this port.
"admin_state_up": Sets admin state of port. if down, port
does not forward packets.
"status": dicates whether port is currently operational
(limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR")
"fixed_ips": list of subnet ID"s and IP addresses to be used on
this port
"device_id": identifies the device (e.g., virtual server) using
this port.
}
:raises: exceptions.NetworkNotFound
:raises: exceptions.StateInvalid
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: create_port() called"))
# Update DB in new session so exceptions rollback changes
with context.session.begin(subtransactions=True):
self._ensure_default_security_group_on_port(context, port)
sgids = self._get_security_groups_on_port(context, port)
# non-router port status is set to pending. it is then updated
# after the async rest call completes. router ports are synchronous
if port['port']['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_INTF:
port['port']['status'] = const.PORT_STATUS_ACTIVE
else:
port['port']['status'] = const.PORT_STATUS_BUILD
dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, [])
new_port = super(NeutronRestProxyV2, self).create_port(context,
port)
self._process_port_create_security_group(context, new_port, sgids)
if (portbindings.HOST_ID in port['port']
and 'id' in new_port):
host_id = port['port'][portbindings.HOST_ID]
porttracker_db.put_port_hostid(context, new_port['id'],
host_id)
new_port[addr_pair.ADDRESS_PAIRS] = (
self._process_create_allowed_address_pairs(
context, new_port,
port['port'].get(addr_pair.ADDRESS_PAIRS)))
self._process_port_create_extra_dhcp_opts(context, new_port,
dhcp_opts)
new_port = self._extend_port_dict_binding(context, new_port)
net = super(NeutronRestProxyV2,
self).get_network(context, new_port["network_id"])
if self.add_meta_server_route:
if new_port['device_owner'] == const.DEVICE_OWNER_DHCP:
destination = METADATA_SERVER_IP + '/32'
self._add_host_route(context, destination, new_port)
# create on network ctrl
mapped_port = self._map_state_and_status(new_port)
# ports have to be created synchronously when creating a router
# port since adding router interfaces is a multi-call process
if mapped_port['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_INTF:
self.servers.rest_create_port(net["tenant_id"],
new_port["network_id"],
mapped_port)
else:
self.evpool.spawn_n(self.async_port_create, net["tenant_id"],
new_port["network_id"], mapped_port)
self.notify_security_groups_member_updated(context, new_port)
return new_port
def get_port(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
port = super(NeutronRestProxyV2, self).get_port(context, id,
fields)
self._extend_port_dict_binding(context, port)
return self._fields(port, fields)
def get_ports(self, context, filters=None, fields=None):
with context.session.begin(subtransactions=True):
ports = super(NeutronRestProxyV2, self).get_ports(context, filters,
fields)
for port in ports:
self._extend_port_dict_binding(context, port)
return [self._fields(port, fields) for port in ports]
@put_context_in_serverpool
def update_port(self, context, port_id, port):
"""Update values of a port.
:param context: neutron api request context
:param id: UUID representing the port to update.
:param port: dictionary with keys indicating fields to update.
:returns: a mapping sequence with the following signature:
{
"id": uuid represeting the port.
"network_id": uuid of network.
"tenant_id": tenant_id
"mac_address": mac address to use on this port.
"admin_state_up": sets admin state of port. if down, port
does not forward packets.
"status": dicates whether port is currently operational
(limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR")
"fixed_ips": list of subnet ID's and IP addresses to be used on
this port
"device_id": identifies the device (e.g., virtual server) using
this port.
}
:raises: exceptions.StateInvalid
:raises: exceptions.PortNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: update_port() called"))
self._warn_on_state_status(port['port'])
# Validate Args
orig_port = super(NeutronRestProxyV2, self).get_port(context, port_id)
with context.session.begin(subtransactions=True):
# Update DB
new_port = super(NeutronRestProxyV2,
self).update_port(context, port_id, port)
ctrl_update_required = False
if addr_pair.ADDRESS_PAIRS in port['port']:
ctrl_update_required |= (
self.update_address_pairs_on_port(context, port_id, port,
orig_port, new_port))
self._update_extra_dhcp_opts_on_port(context, port_id, port,
new_port)
old_host_id = porttracker_db.get_port_hostid(context,
orig_port['id'])
if (portbindings.HOST_ID in port['port']
and 'id' in new_port):
host_id = port['port'][portbindings.HOST_ID]
porttracker_db.put_port_hostid(context, new_port['id'],
host_id)
if old_host_id != host_id:
ctrl_update_required = True
if (new_port.get("device_id") != orig_port.get("device_id") and
orig_port.get("device_id")):
ctrl_update_required = True
if ctrl_update_required:
# tenant_id must come from network in case network is shared
net_tenant_id = self._get_port_net_tenantid(context, new_port)
new_port = self._extend_port_dict_binding(context, new_port)
mapped_port = self._map_state_and_status(new_port)
self.servers.rest_update_port(net_tenant_id,
new_port["network_id"],
mapped_port)
agent_update_required = self.update_security_group_on_port(
context, port_id, port, orig_port, new_port)
agent_update_required |= self.is_security_group_member_updated(
context, orig_port, new_port)
# return new_port
return new_port
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock
@utils.synchronized('bsn-port-barrier')
@put_context_in_serverpool
def delete_port(self, context, port_id, l3_port_check=True):
"""Delete a port.
:param context: neutron api request context
:param id: UUID representing the port to delete.
:raises: exceptions.PortInUse
:raises: exceptions.PortNotFound
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: delete_port() called"))
# if needed, check to see if this is a port owned by
# and l3-router. If so, we should prevent deletion.
if l3_port_check:
self.prevent_l3_port_deletion(context, port_id)
with context.session.begin(subtransactions=True):
router_ids = self.disassociate_floatingips(
context, port_id, do_notify=False)
self._delete_port_security_group_bindings(context, port_id)
port = super(NeutronRestProxyV2, self).get_port(context, port_id)
# Tenant ID must come from network in case the network is shared
tenid = self._get_port_net_tenantid(context, port)
self._delete_port(context, port_id)
self.servers.rest_delete_port(tenid, port['network_id'], port_id)
# now that we've left db transaction, we are safe to notify
self.notify_routers_updated(context, router_ids)
@put_context_in_serverpool
def create_subnet(self, context, subnet):
LOG.debug(_("NeutronRestProxyV2: create_subnet() called"))
self._warn_on_state_status(subnet['subnet'])
with context.session.begin(subtransactions=True):
# create subnet in DB
new_subnet = super(NeutronRestProxyV2,
self).create_subnet(context, subnet)
net_id = new_subnet['network_id']
orig_net = super(NeutronRestProxyV2,
self).get_network(context, net_id)
# update network on network controller
self._send_update_network(orig_net, context)
return new_subnet
@put_context_in_serverpool
def update_subnet(self, context, id, subnet):
LOG.debug(_("NeutronRestProxyV2: update_subnet() called"))
self._warn_on_state_status(subnet['subnet'])
with context.session.begin(subtransactions=True):
# update subnet in DB
new_subnet = super(NeutronRestProxyV2,
self).update_subnet(context, id, subnet)
net_id = new_subnet['network_id']
orig_net = super(NeutronRestProxyV2,
self).get_network(context, net_id)
# update network on network controller
self._send_update_network(orig_net, context)
return new_subnet
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock
@utils.synchronized('bsn-port-barrier')
@put_context_in_serverpool
def delete_subnet(self, context, id):
LOG.debug(_("NeutronRestProxyV2: delete_subnet() called"))
orig_subnet = super(NeutronRestProxyV2, self).get_subnet(context, id)
net_id = orig_subnet['network_id']
with context.session.begin(subtransactions=True):
# delete subnet in DB
super(NeutronRestProxyV2, self).delete_subnet(context, id)
orig_net = super(NeutronRestProxyV2, self).get_network(context,
net_id)
# update network on network controller - exception will rollback
self._send_update_network(orig_net, context)
def _get_tenant_default_router_rules(self, tenant):
rules = cfg.CONF.ROUTER.tenant_default_router_rule
defaultset = []
tenantset = []
for rule in rules:
items = rule.split(':')
if len(items) == 5:
(tenantid, source, destination, action, nexthops) = items
elif len(items) == 4:
(tenantid, source, destination, action) = items
nexthops = ''
else:
continue
parsedrule = {'source': source,
'destination': destination, 'action': action,
'nexthops': nexthops.split(',')}
if parsedrule['nexthops'][0] == '':
parsedrule['nexthops'] = []
if tenantid == '*':
defaultset.append(parsedrule)
if tenantid == tenant:
tenantset.append(parsedrule)
if tenantset:
return tenantset
return defaultset
@put_context_in_serverpool
def create_router(self, context, router):
LOG.debug(_("NeutronRestProxyV2: create_router() called"))
self._warn_on_state_status(router['router'])
tenant_id = self._get_tenant_id_for_create(context, router["router"])
# set default router rules
rules = self._get_tenant_default_router_rules(tenant_id)
router['router']['router_rules'] = rules
with context.session.begin(subtransactions=True):
# create router in DB
new_router = super(NeutronRestProxyV2, self).create_router(context,
router)
mapped_router = self._map_state_and_status(new_router)
self.servers.rest_create_router(tenant_id, mapped_router)
# return created router
return new_router
@put_context_in_serverpool
def update_router(self, context, router_id, router):
LOG.debug(_("NeutronRestProxyV2.update_router() called"))
self._warn_on_state_status(router['router'])
orig_router = super(NeutronRestProxyV2, self).get_router(context,
router_id)
tenant_id = orig_router["tenant_id"]
with context.session.begin(subtransactions=True):
new_router = super(NeutronRestProxyV2,
self).update_router(context, router_id, router)
router = self._map_state_and_status(new_router)
# update router on network controller
self.servers.rest_update_router(tenant_id, router, router_id)
# return updated router
return new_router
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock.
# delete_router ends up calling _delete_port instead of delete_port.
@utils.synchronized('bsn-port-barrier')
@put_context_in_serverpool
def delete_router(self, context, router_id):
LOG.debug(_("NeutronRestProxyV2: delete_router() called"))
with context.session.begin(subtransactions=True):
orig_router = self._get_router(context, router_id)
tenant_id = orig_router["tenant_id"]
# Ensure that the router is not used
router_filter = {'router_id': [router_id]}
fips = self.get_floatingips_count(context.elevated(),
filters=router_filter)
if fips:
raise l3.RouterInUse(router_id=router_id)
device_owner = l3_db.DEVICE_OWNER_ROUTER_INTF
device_filter = {'device_id': [router_id],
'device_owner': [device_owner]}
ports = self.get_ports_count(context.elevated(),
filters=device_filter)
if ports:
raise l3.RouterInUse(router_id=router_id)
ret_val = super(NeutronRestProxyV2,
self).delete_router(context, router_id)
# delete from network ctrl
self.servers.rest_delete_router(tenant_id, router_id)
return ret_val
@put_context_in_serverpool
def add_router_interface(self, context, router_id, interface_info):
LOG.debug(_("NeutronRestProxyV2: add_router_interface() called"))
# Validate args
router = self._get_router(context, router_id)
tenant_id = router['tenant_id']
with context.session.begin(subtransactions=True):
# create interface in DB
new_intf_info = super(NeutronRestProxyV2,
self).add_router_interface(context,
router_id,
interface_info)
port = self._get_port(context, new_intf_info['port_id'])
net_id = port['network_id']
subnet_id = new_intf_info['subnet_id']
# we will use the port's network id as interface's id
interface_id = net_id
intf_details = self._get_router_intf_details(context,
interface_id,
subnet_id)
# create interface on the network controller
self.servers.rest_add_router_interface(tenant_id, router_id,
intf_details)
return new_intf_info
@put_context_in_serverpool
def remove_router_interface(self, context, router_id, interface_info):
LOG.debug(_("NeutronRestProxyV2: remove_router_interface() called"))
# Validate args
router = self._get_router(context, router_id)
tenant_id = router['tenant_id']
# we will first get the interface identifier before deleting in the DB
if not interface_info:
msg = _("Either subnet_id or port_id must be specified")
raise exceptions.BadRequest(resource='router', msg=msg)
if 'port_id' in interface_info:
port = self._get_port(context, interface_info['port_id'])
interface_id = port['network_id']
elif 'subnet_id' in interface_info:
subnet = self._get_subnet(context, interface_info['subnet_id'])
interface_id = subnet['network_id']
else:
msg = _("Either subnet_id or port_id must be specified")
raise exceptions.BadRequest(resource='router', msg=msg)
with context.session.begin(subtransactions=True):
# remove router in DB
del_ret = super(NeutronRestProxyV2,
self).remove_router_interface(context,
router_id,
interface_info)
# create router on the network controller
self.servers.rest_remove_router_interface(tenant_id, router_id,
interface_id)
return del_ret
@put_context_in_serverpool
def create_floatingip(self, context, floatingip):
LOG.debug(_("NeutronRestProxyV2: create_floatingip() called"))
with context.session.begin(subtransactions=True):
# create floatingip in DB
new_fl_ip = super(NeutronRestProxyV2,
self).create_floatingip(context, floatingip)
# create floatingip on the network controller
try:
if 'floatingip' in self.servers.get_capabilities():
self.servers.rest_create_floatingip(
new_fl_ip['tenant_id'], new_fl_ip)
else:
self._send_floatingip_update(context)
except servermanager.RemoteRestError as e:
with excutils.save_and_reraise_exception():
LOG.error(
_("NeutronRestProxyV2: Unable to create remote "
"floating IP: %s"), e)
# return created floating IP
return new_fl_ip
@put_context_in_serverpool
def update_floatingip(self, context, id, floatingip):
LOG.debug(_("NeutronRestProxyV2: update_floatingip() called"))
with context.session.begin(subtransactions=True):
# update floatingip in DB
new_fl_ip = super(NeutronRestProxyV2,
self).update_floatingip(context, id, floatingip)
# update network on network controller
if 'floatingip' in self.servers.get_capabilities():
self.servers.rest_update_floatingip(new_fl_ip['tenant_id'],
new_fl_ip, id)
else:
self._send_floatingip_update(context)
return new_fl_ip
@put_context_in_serverpool
def delete_floatingip(self, context, id):
LOG.debug(_("NeutronRestProxyV2: delete_floatingip() called"))
with context.session.begin(subtransactions=True):
# delete floating IP in DB
old_fip = super(NeutronRestProxyV2, self).get_floatingip(context,
id)
super(NeutronRestProxyV2, self).delete_floatingip(context, id)
# update network on network controller
if 'floatingip' in self.servers.get_capabilities():
self.servers.rest_delete_floatingip(old_fip['tenant_id'], id)
else:
self._send_floatingip_update(context)
@put_context_in_serverpool
def disassociate_floatingips(self, context, port_id, do_notify=True):
LOG.debug(_("NeutronRestProxyV2: diassociate_floatingips() called"))
router_ids = super(NeutronRestProxyV2, self).disassociate_floatingips(
context, port_id, do_notify=do_notify)
self._send_floatingip_update(context)
return router_ids
# overriding method from l3_db as original method calls
# self.delete_floatingip() which in turn calls self.delete_port() which
# is locked with 'bsn-port-barrier'
@put_context_in_serverpool
def delete_disassociated_floatingips(self, context, network_id):
query = self._model_query(context, l3_db.FloatingIP)
query = query.filter_by(floating_network_id=network_id,
fixed_port_id=None,
router_id=None)
for fip in query:
context.session.delete(fip)
self._delete_port(context.elevated(), fip['floating_port_id'])
def _send_floatingip_update(self, context):
try:
ext_net_id = self.get_external_network_id(context)
if ext_net_id:
# Use the elevated state of the context for the ext_net query
admin_context = context.elevated()
ext_net = super(NeutronRestProxyV2,
self).get_network(admin_context, ext_net_id)
# update external network on network controller
self._send_update_network(ext_net, admin_context)
except exceptions.TooManyExternalNetworks:
# get_external_network can raise errors when multiple external
# networks are detected, which isn't supported by the Plugin
LOG.error(_("NeutronRestProxyV2: too many external networks"))
def _add_host_route(self, context, destination, port):
subnet = {}
for fixed_ip in port['fixed_ips']:
subnet_id = fixed_ip['subnet_id']
nexthop = fixed_ip['ip_address']
subnet['host_routes'] = [{'destination': destination,
'nexthop': nexthop}]
updated_subnet = self.update_subnet(context,
subnet_id,
{'subnet': subnet})
payload = {'subnet': updated_subnet}
self._dhcp_agent_notifier.notify(context, payload,
'subnet.update.end')
LOG.debug(_("Adding host route: "))
LOG.debug(_("Destination:%(dst)s nexthop:%(next)s"),
{'dst': destination, 'next': nexthop})
|
shakamunyi/neutron-dvr
|
neutron/plugins/bigswitch/plugin.py
|
Python
|
apache-2.0
| 51,022
|
# -*- coding: utf-8 -*-
#
# fi-ware-chef_validator documentation build configuration file, created by
# sphinx-quickstart on Wed Jul 15 12:28:39 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('./'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'fi-ware-chef_validator'
copyright = u'2015, Pedro Verdugo'
author = u'Pedro Verdugo'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from chef_validator.tests.unit.version import version_info
# The short X.Y version.
version = version_info.version_string()
# The full version, including alpha/beta/rc tags.
release = version_info.release_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# Set the default Pygments syntax
highlight_language = 'python'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'fi-ware-chef_validatordoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'fi-ware-chef_validator.tex',
u'fi-ware-chef\\_validator Documentation',
u'Pedro Verdugo', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'fi-ware-chef_validator',
u'fi-ware-chef_validator Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'fi-ware-chef_validator',
u'fi-ware-chef_validator Documentation',
author, 'fi-ware-chef_validator', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
|
ging/fiware-chef_validator
|
doc/source/conf.py
|
Python
|
apache-2.0
| 9,737
|
# Copyright 2017 OP5 AB
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for policy unit tests."""
import os
import fixtures
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_policy import opts as policy_opts
from oslo_serialization import jsonutils
from oslotest import base
from monasca_api.common.policy import policy_engine
CONF = cfg.CONF
class FakePolicy(object):
def list_rules(self):
return []
class ConfigFixture(config_fixture.Config):
def setUp(self):
super(ConfigFixture, self).setUp()
CONF(args=[],
prog='api',
project='monasca',
version=0,
description='Testing monasca-api.common')
policy_opts.set_defaults(CONF)
class BaseTestCase(base.BaseTestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.useFixture(ConfigFixture(CONF))
self.useFixture(EmptyPolicyFixture())
@staticmethod
def conf_override(**kw):
"""Override flag variables for a test."""
group = kw.pop('group', None)
for k, v in kw.items():
CONF.set_override(k, v, group)
class EmptyPolicyFixture(fixtures.Fixture):
"""Override the policy with an empty policy file.
This overrides the policy with a completely fake and synthetic
policy file.
"""
def setUp(self):
super(EmptyPolicyFixture, self).setUp()
self._prepare_policy()
policy_engine.POLICIES = FakePolicy()
policy_engine.reset()
policy_engine.init()
self.addCleanup(policy_engine.reset)
def _prepare_policy(self):
policy_dir = self.useFixture(fixtures.TempDir())
policy_file = os.path.join(policy_dir.path, 'policy.yaml')
policy_rules = jsonutils.loads('{}')
self.add_missing_default_rules(policy_rules)
with open(policy_file, 'w') as f:
jsonutils.dump(policy_rules, f)
BaseTestCase.conf_override(policy_file=policy_file,
group='oslo_policy')
BaseTestCase.conf_override(policy_dirs=[], group='oslo_policy')
def add_missing_default_rules(self, rules):
policies = FakePolicy()
for rule in policies.list_rules():
if rule.name not in rules:
rules[rule.name] = rule.check_str
|
stackforge/monasca-api
|
monasca_api/tests/policy/base.py
|
Python
|
apache-2.0
| 3,063
|
# -*- Python -*-
import os
import platform
import re
import subprocess
import tempfile
import lit.formats
import lit.util
from lit.llvm import llvm_config
from lit.llvm.subst import ToolSubst
# Configuration file for the 'lit' integration test runner.
# name: The name of this integration test suite.
config.name = 'MLIR_INTEGRATION'
config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell)
# suffixes: A list of file extensions to treat as integration test files.
config.suffixes = ['.mlir']
# test_source_root: The root path where integration tests are located.
config.test_source_root = os.path.dirname(__file__)
# test_exec_root: The root path where integration tests should be run.
config.test_exec_root = os.path.join(config.mlir_obj_root, 'integration_test')
config.substitutions.append(('%PATH%', config.environment['PATH']))
config.substitutions.append(('%shlibext', config.llvm_shlib_ext))
config.substitutions.append(('%mlir_src_root', config.mlir_src_root))
llvm_config.with_system_environment(['HOME', 'INCLUDE', 'LIB', 'TMP', 'TEMP'])
llvm_config.use_default_substitutions()
# excludes: A list of directories to exclude from the integration testsuite.
config.excludes = ['CMakeLists.txt', 'README.txt', 'LICENSE.txt']
# Tweak the PATH to include the tools dir.
llvm_config.with_environment('PATH', config.llvm_tools_dir, append_path=True)
tool_dirs = [config.mlir_tools_dir, config.llvm_tools_dir]
tools = [
'mlir-opt',
'mlir-cpu-runner',
]
# The following tools are optional.
tools.extend([
ToolSubst(
'%mlir_integration_test_dir',
config.mlir_integration_test_dir,
unresolved='ignore'),
])
llvm_config.add_tool_substitutions(tools, tool_dirs)
|
google/llvm-propeller
|
mlir/integration_test/lit.cfg.py
|
Python
|
apache-2.0
| 1,725
|
# Copyright 2012 Twitter
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import calendar
from django.shortcuts import render_to_response, get_object_or_404
from django.http import HttpResponseRedirect
from django.template import RequestContext
from django.contrib.auth.models import User
from django.contrib.admin.views.decorators import staff_member_required
from accounts.views import _backend_hackend
from stats.interface import get_top_recent_apps
@staff_member_required
def metrics(request):
TMPL = """
SELECT
date_trunc('day', %(date)s),
COUNT(1)
FROM %(table)s
GROUP BY date_trunc('day', %(date)s)
ORDER BY date_trunc('day', %(date)s) ASC
"""
def convert(lst):
return [(calendar.timegm(i[0].timetuple()), i[1]) for i in lst]
from django.db import connection
def execute(sql):
cursor = connection.cursor()
cursor.execute(sql)
return cursor.fetchall()
users = convert(execute(TMPL % {
'table': 'auth_user',
'date': 'date_joined',
}))
apps = convert(execute(TMPL % {
'table': 'dashboard_app',
'date': 'date_created',
}))
ab_signups = convert(execute(TMPL % {
'table': 'accounts_abtestingregistration',
'date': 'date_created',
}))
top_apps = get_top_recent_apps()
context = {
'title': 'Metrics',
'users': users,
'apps': apps,
'top_apps': top_apps,
'ab_signups': ab_signups,
}
return render_to_response('admin/metrics.html', context,
context_instance=RequestContext(request))
@staff_member_required
def admin_login(request, username):
user = get_object_or_404(User, username__iexact=username)
_backend_hackend(request, user)
return HttpResponseRedirect('/')
|
clutchio/clutch
|
admin_ext/views.py
|
Python
|
apache-2.0
| 2,305
|
import unittest
from robot.parsing.model import TestCase, TestCaseFile
from robot.utils.asserts import assert_equals
from robotide.controller.commands import ChangeTag
from robotide.controller.filecontrollers import TestCaseFileController
from robotide.controller.macrocontrollers import TestCaseController
from robotide.controller.tablecontrollers import TestCaseTableController
from robotide.controller.tags import Tag
from robotide.controller.ui.treecontroller import TreeController, _History, \
TestSelectionController
class ActionRegistererMock(object):
def register_actions(self, action_collections):
self.action_collections = action_collections
def register_action(self, action):
pass
class TestTreeController(unittest.TestCase):
def test_register_tree_actions(self):
mocked_ar = ActionRegistererMock()
TreeController(None, mocked_ar, None, None).register_tree_actions()
self.assertEquals(
["Go &Back", "Go &Forward"],
[a.name for a in mocked_ar.action_collections])
class _BaseTreeControllerTest(object):
def setUp(self):
self.history = _History()
self.controller = TreeController(
self._tree_mock(), None, None, None, history=self.history)
self.controller.add_to_history("Top Suite")
def _tree_mock(self):
tree_mock = lambda: 0
self._tree_mock_items = []
tree_mock.SelectItem = lambda i: self._tree_mock_items.append(i)
return tree_mock
def _select_node(self, value):
self.controller.add_to_history(value)
def _go_back_and_return_selection(self):
self.controller.OnGoBack(None)
return self._tree_mock_items[-1]
def _go_forward_and_return_selection(self):
self.controller.OnGoForward(None)
return self._tree_mock_items[-1]
class TestNavigationHistory(_BaseTreeControllerTest, unittest.TestCase):
def test_go_back_one_level(self):
self._select_node('Top Suite Fake UK 2')
self.assertEquals('Top Suite', self._go_back_and_return_selection())
def test_go_back_two_levels(self):
nodes = ['Top Suite Fake UK 1', 'Sub Suite 1', 'Sub Suite 1 Fake UK 0']
for name in nodes:
self._select_node(name)
nodes.reverse()
for name in nodes[1:]:
self.assertEquals(name, self._go_back_and_return_selection())
def test_it_is_not_possible_to_go_back_farther_than_history(self):
nodes = ['Top Suite Fake UK 1', 'Sub Suite 1', 'Sub Suite 1 Fake UK 0']
for name in nodes:
self._select_node(name)
nodes.reverse()
for name in nodes[1:] + ['Top Suite']:
self._go_back_and_assert_selection(name)
self._go_back_and_assert_selection('Top Suite')
def test_go_back_with_selecting_in_between(self):
nodes = ['Top Suite Fake UK 1', 'Sub Suite 1', 'Sub Suite 1 Fake UK 0']
for name in nodes:
self._select_node(name)
self._go_back_and_assert_selection('Sub Suite 1')
self._select_node('Sub Suite 2 Fake UK 0')
self._go_back_and_assert_selection('Sub Suite 1')
def test_go_forward(self):
nodes = ['Top Suite Fake UK 1', 'Sub Suite 1', 'Sub Suite 1 Fake UK 0']
for name in nodes:
self._select_node(name)
for _ in range(3):
self.controller.OnGoBack(None)
for name in nodes:
self._go_forward_and_assert_selection(name)
def test_go_back_and_forward_between_suite_and_resource(self):
nodes = ['Top Suite Fake UK 0', 'Resource Keyword',
'Sub Suite 0 Fake UK 2']
for name in nodes:
self._select_node(name)
self._go_back_and_assert_selection('Resource Keyword')
self._go_back_and_assert_selection('Top Suite Fake UK 0')
self._go_forward_and_assert_selection('Resource Keyword')
self._go_forward_and_assert_selection('Sub Suite 0 Fake UK 2')
def _go_back_and_assert_selection(self, expected_selection):
assert_equals(self._go_back_and_return_selection(), expected_selection)
def _go_forward_and_assert_selection(self, expected_selection):
assert_equals(
self._go_forward_and_return_selection(), expected_selection)
class TestTestSelectionController(unittest.TestCase):
def setUp(self):
self._tsc = TestSelectionController()
def test_test_selection_is_empty_by_default(self):
self.assertTrue(self._tsc.is_empty())
def test_test_selection_is_not_empty_when_it_contains_a_test(self):
self._tsc.select(self._create_test())
self.assertFalse(self._tsc.is_empty())
def test_test_selection_is_empty_after_removing_same_test_from_there_even_when_it_is_not_the_same_object(self):
self._tsc.select(self._create_test())
self._tsc.select(self._create_test(), False)
self.assertTrue(self._tsc.is_empty())
def test_adding_tag_to_selected_tests(self):
tests = [self._create_test('test%d' % i) for i in range(10)]
for t in tests:
self._tsc.select(t)
self._tsc.add_tag('foo')
for t in tests:
self.assertEqual([tag.name for tag in t.tags], ['foo'])
def test_adding_a_tag_to_test_with_a_default_tag(self):
test = self._create_test()
test.datafile_controller.default_tags.execute(
ChangeTag(Tag(None), 'default'))
assert_equals([t.name for t in test.tags], ['default'])
self._tsc.select(test)
self._tsc.add_tag('custom')
self.assertEqual([t.name for t in test.tags], ['default', 'custom'])
def _create_test(self, name='test'):
suite = TestCaseFile(source='suite')
suite_controller = TestCaseFileController(suite)
parent = TestCaseTableController(
suite_controller, suite.testcase_table)
test = TestCase(parent=lambda: 0, name=name)
return TestCaseController(parent, test)
|
caio2k/RIDE
|
utest/controller/ui/test_treecontroller.py
|
Python
|
apache-2.0
| 6,000
|
from __future__ import print_function
import os
import sys
import warnings
sys.path.insert(1, os.path.join("..","..","..",".."))
import h2o
from h2o.estimators import H2OTargetEncoderEstimator
from h2o.exceptions import H2ODeprecationWarning
from h2o.utils.metaclass import fullname
from tests import pyunit_utils as pu
seed = 42
te_init_name = fullname(H2OTargetEncoderEstimator.__init__)
def load_dataset(incl_test=False, incl_foldc=False):
fr = h2o.import_file(pu.locate("smalldata/titanic/titanic_expanded.csv"), header=1)
target = "survived"
train = fr
test = None
if incl_test:
fr = fr.split_frame(ratios=[.8], destination_frames=["titanic_train", "titanic_test"], seed=seed)
train = fr[0]
test = fr[1]
if incl_foldc:
train["foldc"] = train.kfold_column(3, seed)
return pu.ns(train=train, test=test, target=target)
def test_deprecated_k_param_is_alias_for_inflection_point():
ds = load_dataset(incl_test=True)
te = H2OTargetEncoderEstimator(noise=0)
te.train(y=ds.target, training_frame=ds.train)
encoded = te.predict(ds.test)
# print(encoded)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
te_k = H2OTargetEncoderEstimator(noise=0, k=5, blending=True)
assert len(w) == 1
assert issubclass(w[0].category, H2ODeprecationWarning)
assert "``k`` param of ``{}`` is deprecated".format(te_init_name) in str(w[0].message)
te_k.train(y=ds.target, training_frame=ds.train)
encoded_k = te_k.predict(ds.test)
# print(encoded_k)
te_ip = H2OTargetEncoderEstimator(noise=0, inflection_point=5, blending=True)
te_ip.train(y=ds.target, training_frame=ds.train)
encoded_ip = te_ip.predict(ds.test)
# print(encoded_ip)
try:
pu.compare_frames(encoded_k, encoded, 0, tol_numeric=1e-5)
assert False, "should have raised"
except AssertionError as ae:
assert "should have raised" not in str(ae)
assert pu.compare_frames(encoded_k, encoded_ip, 0, tol_numeric=1e-5)
def test_deprecated_f_param_is_alias_for_smoothing():
ds = load_dataset(incl_test=True)
te = H2OTargetEncoderEstimator(noise=0)
te.train(y=ds.target, training_frame=ds.train)
encoded = te.predict(ds.test)
# print(encoded)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
te_f = H2OTargetEncoderEstimator(noise=0, f=25, blending=True)
assert len(w) == 1
assert issubclass(w[0].category, H2ODeprecationWarning)
assert "``f`` param of ``{}`` is deprecated".format(te_init_name) in str(w[0].message)
te_f.train(y=ds.target, training_frame=ds.train)
encoded_f = te_f.predict(ds.test)
# print(encoded_f)
te_s = H2OTargetEncoderEstimator(noise=0, smoothing=25, blending=True)
te_s.train(y=ds.target, training_frame=ds.train)
encoded_s = te_s.predict(ds.test)
# print(encoded_s)
try:
pu.compare_frames(encoded_f, encoded, 0, tol_numeric=1e-5)
assert False, "should have raised"
except AssertionError as ae:
assert "should have raised" not in str(ae)
assert pu.compare_frames(encoded_f, encoded_s, 0, tol_numeric=1e-5)
def test_deprecated_noise_level_param_is_alias_for_noise():
ds = load_dataset(incl_test=True)
te = H2OTargetEncoderEstimator()
te.train(y=ds.target, training_frame=ds.train)
encoded = te.predict(ds.test)
# print(encoded)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
te_nl = H2OTargetEncoderEstimator(noise_level=0)
assert len(w) == 1
assert issubclass(w[0].category, H2ODeprecationWarning)
assert "``noise_level`` param of ``{}`` is deprecated".format(te_init_name) in str(w[0].message)
te_nl.train(y=ds.target, training_frame=ds.train)
encoded_nl = te_nl.predict(ds.test)
# print(encoded_nl)
te_n = H2OTargetEncoderEstimator(noise=0)
te_n.train(y=ds.target, training_frame=ds.train)
encoded_n = te_n.predict(ds.test)
# print(encoded_n)
try:
pu.compare_frames(encoded_nl, encoded, 0, tol_numeric=1e-5)
assert False, "should have raised"
except AssertionError as ae:
assert "should have raised" not in str(ae)
assert pu.compare_frames(encoded_nl, encoded_n, 0, tol_numeric=1e-5)
def test_transform_seed_param_raise_warning():
ds = load_dataset(incl_test=True)
te = H2OTargetEncoderEstimator(seed=42)
te.train(y=ds.target, training_frame=ds.train)
encoded = te.predict(ds.test)
transformed_1 = te.transform(ds.test)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
transformed_2 = te.transform(ds.test, seed=24)
assert len(w) == 1
assert issubclass(w[0].category, H2ODeprecationWarning)
assert "`seed` is deprecated in `transform` method and will be ignored" in str(w[0].message)
assert pu.compare_frames(encoded, transformed_1, 0, tol_numeric=1e-5)
assert pu.compare_frames(encoded, transformed_2, 0, tol_numeric=1e-5)
def test_transform_data_leakage_handling_param_raise_warning():
ds = load_dataset(incl_test=True)
te = H2OTargetEncoderEstimator(data_leakage_handling="leave_one_out", seed=42)
te.train(y=ds.target, training_frame=ds.train)
encoded = te.predict(ds.test)
encoded_as_training = te.transform(ds.test, as_training=True)
transformed_1 = te.transform(ds.test)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
transformed_2 = te.transform(ds.test, data_leakage_handling="none")
assert len(w) == 1
assert issubclass(w[0].category, H2ODeprecationWarning)
assert "`data_leakage_handling` is deprecated in `transform` method and will be ignored" in str(w[0].message)
# if data_leakage_handling is specified and not "none", this is interpreted as `as_training=True`
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
transformed_3 = te.transform(ds.test, data_leakage_handling="leave_one_out")
assert len(w) == 2
assert issubclass(w[1].category, H2ODeprecationWarning)
assert "as_training=True" in str(w[1].message)
assert pu.compare_frames(encoded, transformed_1, 0, tol_numeric=1e-5)
assert pu.compare_frames(encoded, transformed_2, 0, tol_numeric=1e-5)
assert pu.compare_frames(encoded_as_training, transformed_3, 0, tol_numeric=1e-5)
try:
pu.compare_frames(encoded, transformed_3, 0, tol_numeric=1e-5)
assert False, "should have raised"
except AssertionError as ae:
assert "should have raised" not in str(ae)
pu.run_tests([
test_deprecated_k_param_is_alias_for_inflection_point,
test_deprecated_f_param_is_alias_for_smoothing,
test_deprecated_noise_level_param_is_alias_for_noise,
test_transform_seed_param_raise_warning,
test_transform_data_leakage_handling_param_raise_warning,
])
|
h2oai/h2o-3
|
h2o-py/tests/testdir_algos/targetencoder/pyunit_te_deprecated_params.py
|
Python
|
apache-2.0
| 7,090
|
#!/usr/bin/env python
"""Tests the mysql data store."""
import unittest
# pylint: disable=unused-import,g-bad-import-order
from grr.lib import server_plugins
# pylint: enable=unused-import,g-bad-import-order
import logging
from grr.lib import access_control
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import data_store_test
from grr.lib import flags
from grr.lib import test_lib
from grr.lib.data_stores import mysql_advanced_data_store
class MysqlAdvancedTestMixin(object):
def InitDatastore(self):
self.token = access_control.ACLToken(username="test",
reason="Running tests")
# Use separate tables for benchmarks / tests so they can be run in parallel.
config_lib.CONFIG.Set("Mysql.database_name", "grr_test_%s" %
self.__class__.__name__)
try:
data_store.DB = mysql_advanced_data_store.MySQLAdvancedDataStore()
data_store.DB.security_manager = test_lib.MockSecurityManager()
data_store.DB.RecreateTables()
except Exception as e:
logging.debug("Error while connecting to MySQL db: %s.", e)
raise unittest.SkipTest("Skipping since Mysql db is not reachable.")
def DestroyDatastore(self):
data_store.DB.DropTables()
def testCorrectDataStore(self):
self.assertTrue(
isinstance(data_store.DB,
mysql_advanced_data_store.MySQLAdvancedDataStore))
class MysqlAdvancedDataStoreTest(
MysqlAdvancedTestMixin, data_store_test._DataStoreTest):
"""Test the mysql data store abstraction."""
class MysqlAdvancedDataStoreBenchmarks(
MysqlAdvancedTestMixin, data_store_test.DataStoreBenchmarks):
"""Benchmark the mysql data store abstraction."""
class MysqlAdvancedDataStoreCSVBenchmarks(
MysqlAdvancedTestMixin, data_store_test.DataStoreCSVBenchmarks):
"""Benchmark the mysql data store abstraction."""
def main(args):
test_lib.main(args)
if __name__ == "__main__":
flags.StartMain(main)
|
ksmaheshkumar/grr
|
lib/data_stores/mysql_advanced_data_store_test.py
|
Python
|
apache-2.0
| 2,003
|
# The content of this file was generated using the Python profile of libCellML 0.2.0.
from enum import Enum
from math import *
__version__ = "0.3.0"
LIBCELLML_VERSION = "0.2.0"
STATE_COUNT = 3
VARIABLE_COUNT = 19
class VariableType(Enum):
VARIABLE_OF_INTEGRATION = 1
STATE = 2
CONSTANT = 3
COMPUTED_CONSTANT = 4
ALGEBRAIC = 5
EXTERNAL = 6
VOI_INFO = {"name": "time", "units": "millisecond", "component": "environment", "type": VariableType.VARIABLE_OF_INTEGRATION}
STATE_INFO = [
{"name": "m", "units": "dimensionless", "component": "sodium_channel_m_gate", "type": VariableType.STATE},
{"name": "h", "units": "dimensionless", "component": "sodium_channel_h_gate", "type": VariableType.STATE},
{"name": "n", "units": "dimensionless", "component": "potassium_channel_n_gate", "type": VariableType.STATE}
]
VARIABLE_INFO = [
{"name": "V", "units": "millivolt", "component": "membrane", "type": VariableType.EXTERNAL},
{"name": "g_L", "units": "milliS_per_cm2", "component": "leakage_current", "type": VariableType.CONSTANT},
{"name": "Cm", "units": "microF_per_cm2", "component": "membrane", "type": VariableType.CONSTANT},
{"name": "E_R", "units": "millivolt", "component": "membrane", "type": VariableType.CONSTANT},
{"name": "g_K", "units": "milliS_per_cm2", "component": "potassium_channel", "type": VariableType.CONSTANT},
{"name": "g_Na", "units": "milliS_per_cm2", "component": "sodium_channel", "type": VariableType.CONSTANT},
{"name": "i_Stim", "units": "microA_per_cm2", "component": "membrane", "type": VariableType.ALGEBRAIC},
{"name": "E_L", "units": "millivolt", "component": "leakage_current", "type": VariableType.COMPUTED_CONSTANT},
{"name": "i_L", "units": "microA_per_cm2", "component": "leakage_current", "type": VariableType.ALGEBRAIC},
{"name": "E_Na", "units": "millivolt", "component": "sodium_channel", "type": VariableType.COMPUTED_CONSTANT},
{"name": "i_Na", "units": "microA_per_cm2", "component": "sodium_channel", "type": VariableType.EXTERNAL},
{"name": "alpha_m", "units": "per_millisecond", "component": "sodium_channel_m_gate", "type": VariableType.ALGEBRAIC},
{"name": "beta_m", "units": "per_millisecond", "component": "sodium_channel_m_gate", "type": VariableType.ALGEBRAIC},
{"name": "alpha_h", "units": "per_millisecond", "component": "sodium_channel_h_gate", "type": VariableType.ALGEBRAIC},
{"name": "beta_h", "units": "per_millisecond", "component": "sodium_channel_h_gate", "type": VariableType.ALGEBRAIC},
{"name": "E_K", "units": "millivolt", "component": "potassium_channel", "type": VariableType.COMPUTED_CONSTANT},
{"name": "i_K", "units": "microA_per_cm2", "component": "potassium_channel", "type": VariableType.ALGEBRAIC},
{"name": "alpha_n", "units": "per_millisecond", "component": "potassium_channel_n_gate", "type": VariableType.EXTERNAL},
{"name": "beta_n", "units": "per_millisecond", "component": "potassium_channel_n_gate", "type": VariableType.ALGEBRAIC}
]
def leq_func(x, y):
return 1.0 if x <= y else 0.0
def geq_func(x, y):
return 1.0 if x >= y else 0.0
def and_func(x, y):
return 1.0 if bool(x) & bool(y) else 0.0
def create_states_array():
return [nan]*STATE_COUNT
def create_variables_array():
return [nan]*VARIABLE_COUNT
def initialise_variables(voi, states, variables, external_variable):
variables[1] = 0.3
variables[2] = 1.0
variables[3] = 0.0
variables[4] = 36.0
variables[5] = 120.0
states[0] = 0.05
states[1] = 0.6
states[2] = 0.325
variables[0] = external_variable(voi, states, variables, 0)
variables[17] = external_variable(voi, states, variables, 17)
variables[10] = external_variable(voi, states, variables, 10)
def compute_computed_constants(variables):
variables[7] = variables[3]-10.613
variables[9] = variables[3]-115.0
variables[15] = variables[3]+12.0
def compute_rates(voi, states, rates, variables, external_variable):
variables[0] = external_variable(voi, states, variables, 0)
variables[11] = 0.1*(variables[0]+25.0)/(exp((variables[0]+25.0)/10.0)-1.0)
variables[12] = 4.0*exp(variables[0]/18.0)
rates[0] = variables[11]*(1.0-states[0])-variables[12]*states[0]
variables[13] = 0.07*exp(variables[0]/20.0)
variables[14] = 1.0/(exp((variables[0]+30.0)/10.0)+1.0)
rates[1] = variables[13]*(1.0-states[1])-variables[14]*states[1]
variables[17] = external_variable(voi, states, variables, 17)
variables[18] = 0.125*exp(variables[0]/80.0)
rates[2] = variables[17]*(1.0-states[2])-variables[18]*states[2]
def compute_variables(voi, states, rates, variables, external_variable):
variables[0] = external_variable(voi, states, variables, 0)
variables[6] = -20.0 if and_func(geq_func(voi, 10.0), leq_func(voi, 10.5)) else 0.0
variables[8] = variables[1]*(variables[0]-variables[7])
variables[17] = external_variable(voi, states, variables, 17)
variables[10] = external_variable(voi, states, variables, 10)
variables[16] = variables[4]*pow(states[2], 4.0)*(variables[0]-variables[15])
|
cellml/libcellml
|
tests/resources/generator/hodgkin_huxley_squid_axon_model_1952/model.external.py
|
Python
|
apache-2.0
| 5,138
|
# Copyright 2013 - Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from unittest import mock
from oslo_config import cfg
from solum.api.controllers.v1 import trigger
from solum.tests import base
from solum.tests import fakes
@mock.patch('pecan.request', new_callable=fakes.FakePecanRequest)
@mock.patch('pecan.response', new_callable=fakes.FakePecanResponse)
@mock.patch('solum.api.controllers.v1.trigger.app_handler'
'.AppHandler')
class TestTriggerController(base.BaseTestCase):
def test_trigger_get_workflow_with_empty_body(self, assem_mock,
resp_mock, request_mock):
obj = trigger.TriggerController()
workflow = obj._get_workflow({})
self.assertIsNone(workflow)
def test_trigger_get_workflow_with_deploy(self, assem_mock,
resp_mock, request_mock):
obj = trigger.TriggerController()
query = {'workflow': 'deploy'}
workflow = obj._get_workflow(query)
self.assertEqual(['deploy'], list(workflow))
def test_trigger_get_workflow_with_build_deploy(self, assem_mock,
resp_mock, request_mock):
obj = trigger.TriggerController()
query = {'workflow': 'build+deploy'}
workflow = obj._get_workflow(query)
self.assertEqual(['build', 'deploy'], list(workflow))
def test_trigger_get_workflow_with_all(self, assem_mock,
resp_mock, request_mock):
obj = trigger.TriggerController()
query = {'workflow': 'unittest+build+deploy'}
workflow = obj._get_workflow(query)
self.assertEqual(['unittest', 'build', 'deploy'], list(workflow))
def test_trigger_get_workflow_with_invalid_stage(self, assem_mock,
resp_mock, request_mock):
obj = trigger.TriggerController()
query = {'workflow': 'unittest+unitunitunittest'}
workflow = obj._get_workflow(query)
self.assertEqual(['unittest'], list(workflow))
def test_trigger_process_request_private_repo(self, assem_mock,
resp_mock, request_mock):
cfg.CONF.api.rebuild_phrase = "solum retry tests"
status_url = 'https://api.github.com/repos/u/r/statuses/{sha}'
collab_url = ('https://api.github.com/repos/u/r/' +
'collaborators{/collaborator}')
body_dict = {'sender': {'url': 'https://api.github.com'},
'comment': {'commit_id': 'asdf',
'body': ' SOLUM retry tests ',
'user': {'login': 'u'}},
'repository': {'statuses_url': status_url,
'collaborators_url': collab_url,
'private': True}}
obj = trigger.TriggerController()
commit_sha, collab_url = obj._process_request(body_dict)
self.assertIsNone(collab_url)
self.assertEqual('asdf', commit_sha)
def test_trigger_process_request_on_valid_pub_repo(self,
assem_mock, resp_mock,
request_mock):
cfg.CONF.api.rebuild_phrase = "solum retry tests"
status_url = 'https://api.github.com/repos/u/r/statuses/{sha}'
collab_url = ('https://api.github.com/repos/u/r/' +
'collaborators{/collaborator}')
body_dict = {'sender': {'url': 'https://api.github.com'},
'comment': {'commit_id': 'asdf',
'body': 'solum retry tests',
'user': {'login': 'u'}},
'repository': {'statuses_url': status_url,
'collaborators_url': collab_url,
'private': False}}
obj = trigger.TriggerController()
commit_sha, collab_url = obj._process_request(body_dict)
self.assertEqual('https://api.github.com/repos/u/r/collaborators/u',
collab_url)
self.assertEqual('asdf', commit_sha)
@mock.patch('solum.common.policy.check')
def test_trigger_post_with_empty_body(self, mock_policy, assem_mock,
resp_mock, request_mock):
mock_policy.return_value = True
obj = trigger.TriggerController()
obj.post('test_id')
self.assertEqual(400, resp_mock.status)
tw = assem_mock.return_value.trigger_workflow
assert not tw.called
@mock.patch('solum.common.policy.check')
def test_trigger_post_on_github_webhook(self, mock_policy, assem_mock,
resp_mock, request_mock):
mock_policy.return_value = True
status_url = 'https://api.github.com/repos/u/r/statuses/{sha}'
body_dict = {'sender': {'url': 'https://api.github.com'},
'action': 'opened',
'pull_request': {'head': {'sha': 'asdf'}},
'repository': {'statuses_url': status_url}}
expected_st_url = 'https://api.github.com/repos/u/r/statuses/asdf'
request_mock.body = json.dumps(body_dict)
obj = trigger.TriggerController()
obj.post('test_id')
self.assertEqual(202, resp_mock.status)
tw = assem_mock.return_value.trigger_workflow
tw.assert_called_once_with('test_id', 'asdf', expected_st_url, None,
workflow=None)
@mock.patch('solum.common.policy.check')
def test_trigger_post_on_github_comment_webhook(self, mock_policy,
assem_mock, resp_mock,
request_mock):
mock_policy.return_value = True
cfg.CONF.api.rebuild_phrase = "solum retry tests"
status_url = 'https://api.github.com/repos/u/r/statuses/{sha}'
collab_url = ('https://api.github.com/repos/u/r/' +
'collaborators{/collaborator}')
body_dict = {'sender': {'url': 'https://api.github.com'},
'action': 'created',
'comment': {'commit_id': 'asdf',
'body': ' SOLUM retry tests ',
'user': {'login': 'u'}},
'repository': {'statuses_url': status_url,
'collaborators_url': collab_url,
'private': True}}
expected_st_url = 'https://api.github.com/repos/u/r/statuses/asdf'
request_mock.body = json.dumps(body_dict)
obj = trigger.TriggerController()
obj.post('test_id')
self.assertEqual(202, resp_mock.status)
tw = assem_mock.return_value.trigger_workflow
tw.assert_called_once_with('test_id', 'asdf', expected_st_url, None,
workflow=None)
@mock.patch('httplib2.Http.request')
@mock.patch('solum.common.policy.check')
def test_trigger_post_on_mismatch_comment_pub_repo(self, http_mock,
mock_policy,
assem_mock, resp_mock,
request_mock):
mock_policy.return_value = True
cfg.CONF.api.rebuild_phrase = "solum retry tests"
status_url = 'https://api.github.com/repos/u/r/statuses/{sha}'
collab_url = ('https://api.github.com/repos/u/r/' +
'collaborators{/collaborator}')
body_dict = {'sender': {'url': 'https://api.github.com'},
'action': 'created',
'comment': {'commit_id': 'asdf',
'body': 'solum is awesome',
'user': {'login': 'u'}},
'repository': {'statuses_url': status_url,
'collaborators_url': collab_url,
'private': False}}
request_mock.body = json.dumps(body_dict)
http_mock.return_value = ({'status': '204'}, '') # a collaborator
obj = trigger.TriggerController()
obj.post('test_id')
self.assertEqual(403, resp_mock.status)
tw = assem_mock.return_value.trigger_workflow
assert not tw.called
@mock.patch('httplib2.Http.request')
@mock.patch('solum.common.policy.check')
def test_trigger_post_on_valid_comment_pub_repo(self, http_mock,
mock_policy,
assem_mock, resp_mock,
request_mock):
mock_policy.return_value = True
cfg.CONF.api.rebuild_phrase = "solum retry tests"
status_url = 'https://api.github.com/repos/u/r/statuses/{sha}'
collab_url = ('https://api.github.com/repos/u/r/' +
'collaborators{/collaborator}')
body_dict = {'sender': {'url': 'https://api.github.com'},
'action': 'created',
'comment': {'commit_id': 'asdf',
'body': 'solum retry tests',
'user': {'login': 'u'}},
'repository': {'statuses_url': status_url,
'collaborators_url': collab_url,
'private': False}}
expected_st_url = 'https://api.github.com/repos/u/r/statuses/asdf'
expected_clb_url = 'https://api.github.com/repos/u/r/collaborators/u'
request_mock.body = json.dumps(body_dict)
http_mock.return_value = ({'status': '204'}, '') # Valid collaborator
obj = trigger.TriggerController()
obj.post('test_id')
self.assertEqual(202, resp_mock.status)
tw = assem_mock.return_value.trigger_workflow
tw.assert_called_once_with('test_id', 'asdf', expected_st_url,
expected_clb_url, workflow=None)
@mock.patch('solum.common.policy.check')
def test_trigger_post_on_comment_missing_login(self, mock_policy,
assem_mock, resp_mock,
request_mock):
mock_policy.return_value = True
cfg.CONF.api.rebuild_phrase = "solum retry tests"
status_url = 'https://api.github.com/repos/u/r/statuses/{sha}'
collab_url = ('https://api.github.com/repos/u/r/' +
'collaborators{/collaborator}')
body_dict = {'sender': {'url': 'https://api.github.com'},
'comment': {'commit_id': 'asdf',
'body': 'solum retry tests',
'user': 'MISSING_LOGIN'},
'repository': {'statuses_url': status_url,
'collaborators_url': collab_url,
'private': False}}
request_mock.body = json.dumps(body_dict)
obj = trigger.TriggerController()
obj.post('test_id')
self.assertEqual(400, resp_mock.status)
tw = assem_mock.return_value.trigger_workflow
assert not tw.called
@mock.patch('solum.common.policy.check')
def test_trigger_post_on_wrong_github_webhook(self, mock_policy,
assem_mock,
resp_mock, request_mock):
mock_policy.return_value = True
status_url = 'https://api.github.com/repos/u/r/statuses/{sha}'
body_dict = {'sender': {'url': 'https://api.github.com'},
'pull_request': {'head': {'sha': 'asdf'}},
'repository': {'HACKED_statuses_url': status_url}}
request_mock.body = json.dumps(body_dict)
obj = trigger.TriggerController()
obj.post('test_id')
self.assertEqual(400, resp_mock.status)
tw = assem_mock.return_value.trigger_workflow
assert not tw.called
@mock.patch('solum.common.policy.check')
def test_trigger_post_on_unknown_git_webhook(self, mock_policy, assem_mock,
resp_mock, request_mock):
mock_policy.return_value = True
body_dict = {"pull_request": {"head": {"sha": "asdf"}}}
request_mock.body = json.dumps(body_dict)
obj = trigger.TriggerController()
obj.post('test_id')
self.assertEqual(501, resp_mock.status)
tw = assem_mock.return_value.trigger_workflow
assert not tw.called
@mock.patch('solum.common.policy.check')
def test_trigger_post_on_non_github_webhook(self, mock_policy, assem_mock,
resp_mock, request_mock):
mock_policy.return_value = True
body_dict = {"sender": {"url": "https://non-github.com"},
"pull_request": {"head": {"sha": "asdf"}}}
request_mock.body = json.dumps(body_dict)
obj = trigger.TriggerController()
obj.post('test_id')
self.assertEqual(501, resp_mock.status)
tw = assem_mock.return_value.trigger_workflow
assert not tw.called
@mock.patch('solum.common.policy.check')
def test_trigger_post_on_github_ping_webhook(self, mock_policy, assem_mock,
resp_mock, request_mock):
mock_policy.return_value = True
body_dict = {"sender": {"url": "https://api.github.com"},
"zen": "Keep it logically awesome."}
request_mock.body = json.dumps(body_dict)
obj = trigger.TriggerController()
obj.post('test_id')
self.assertEqual(501, resp_mock.status)
tw = assem_mock.return_value.trigger_workflow
assert not tw.called
|
stackforge/solum
|
solum/tests/api/controllers/v1/test_trigger.py
|
Python
|
apache-2.0
| 14,612
|
#!/usr/bin/env python
"""Test the flow archive."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
from absl import app
import mock
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_server.flows.general import transfer as flows_transfer
from grr_response_server.gui import api_call_router_with_approval_checks
from grr_response_server.gui import archive_generator
from grr_response_server.gui import gui_test_lib
from grr_response_server.gui.api_plugins import flow as api_flow
from grr_response_server.output_plugins import csv_plugin
from grr_response_server.output_plugins import sqlite_plugin
from grr_response_server.output_plugins import yaml_plugin
from grr.test_lib import action_mocks
from grr.test_lib import flow_test_lib
from grr.test_lib import test_lib
class TestFlowArchive(gui_test_lib.GRRSeleniumTest):
def setUp(self):
super(TestFlowArchive, self).setUp()
self.client_id = self.SetupClient(0)
self.RequestAndGrantClientApproval(self.client_id)
self.action_mock = action_mocks.FileFinderClientMock()
def testDoesNotShowGenerateArchiveButtonForNonExportableRDFValues(self):
flow_test_lib.TestFlowHelper(
gui_test_lib.FlowWithOneNetworkConnectionResult.__name__,
self.action_mock,
client_id=self.client_id,
token=self.token)
self.Open("/#/clients/%s" % self.client_id)
self.Click("css=a[grrtarget='client.flows']")
self.Click("css=td:contains('FlowWithOneNetworkConnectionResult')")
self.Click("link=Results")
self.WaitUntil(self.IsTextPresent, "42")
self.WaitUntilNot(self.IsTextPresent,
"Files referenced in this collection can be downloaded")
def testDoesNotShowGenerateArchiveButtonWhenResultCollectionIsEmpty(self):
flow_test_lib.TestFlowHelper(
gui_test_lib.RecursiveTestFlow.__name__,
self.action_mock,
client_id=self.client_id,
token=self.token)
self.Open("/#/clients/%s" % self.client_id)
self.Click("css=a[grrtarget='client.flows']")
self.Click("css=td:contains('RecursiveTestFlow')")
self.Click("link=Results")
self.WaitUntil(self.IsTextPresent, "Value")
self.WaitUntilNot(self.IsTextPresent,
"Files referenced in this collection can be downloaded")
def testShowsGenerateArchiveButtonForGetFileFlow(self):
pathspec = rdf_paths.PathSpec(
path=os.path.join(self.base_path, "test.plist"),
pathtype=rdf_paths.PathSpec.PathType.OS)
flow_test_lib.TestFlowHelper(
flows_transfer.GetFile.__name__,
self.action_mock,
client_id=self.client_id,
pathspec=pathspec,
token=self.token)
self.Open("/#/clients/%s" % self.client_id)
self.Click("css=a[grrtarget='client.flows']")
self.Click("css=td:contains('GetFile')")
self.Click("link=Results")
self.WaitUntil(self.IsTextPresent,
"Files referenced in this collection can be downloaded")
def testGenerateArchiveButtonGetsDisabledAfterClick(self):
pathspec = rdf_paths.PathSpec(
path=os.path.join(self.base_path, "test.plist"),
pathtype=rdf_paths.PathSpec.PathType.OS)
flow_test_lib.TestFlowHelper(
flows_transfer.GetFile.__name__,
self.action_mock,
client_id=self.client_id,
pathspec=pathspec,
token=self.token)
self.Open("/#/clients/%s" % self.client_id)
self.Click("css=a[grrtarget='client.flows']")
self.Click("css=td:contains('GetFile')")
self.Click("link=Results")
self.Click("css=button.DownloadButton")
self.WaitUntil(self.IsElementPresent, "css=button.DownloadButton[disabled]")
self.WaitUntil(self.IsTextPresent, "Generation has started")
def testShowsErrorMessageIfArchiveStreamingFailsBeforeFirstChunkIsSent(self):
pathspec = rdf_paths.PathSpec(
path=os.path.join(self.base_path, "test.plist"),
pathtype=rdf_paths.PathSpec.PathType.OS)
flow_id = flow_test_lib.TestFlowHelper(
flows_transfer.GetFile.__name__,
self.action_mock,
client_id=self.client_id,
check_flow_errors=False,
pathspec=pathspec,
token=self.token)
def RaisingStub(*unused_args, **unused_kwargs):
raise RuntimeError("something went wrong")
with utils.Stubber(archive_generator.CollectionArchiveGenerator, "Generate",
RaisingStub):
self.Open("/#/clients/%s" % self.client_id)
self.Click("css=a[grrtarget='client.flows']")
self.Click("css=td:contains('GetFile')")
self.Click("link=Results")
self.Click("css=button.DownloadButton")
self.WaitUntil(self.IsTextPresent,
"Can't generate archive: Unknown error")
self.WaitUntil(self.IsUserNotificationPresent,
"Archive generation failed for flow %s" % flow_id)
@mock.patch.object(
api_call_router_with_approval_checks.ApiCallRouterWithApprovalChecks,
"GetExportedFlowResults",
return_value=api_flow.ApiGetExportedFlowResultsHandler())
def testClickingOnDownloadAsCsvZipStartsDownload(self, mock_method):
self.checkClickingOnDownloadAsStartsDownloadForType(
mock_method, csv_plugin.CSVInstantOutputPlugin.plugin_name,
csv_plugin.CSVInstantOutputPlugin.friendly_name)
@mock.patch.object(
api_call_router_with_approval_checks.ApiCallRouterWithApprovalChecks,
"GetExportedFlowResults",
return_value=api_flow.ApiGetExportedFlowResultsHandler())
def testClickingOnDownloadAsYamlZipStartsDownload(self, mock_method):
self.checkClickingOnDownloadAsStartsDownloadForType(
mock_method,
yaml_plugin.YamlInstantOutputPluginWithExportConversion.plugin_name,
yaml_plugin.YamlInstantOutputPluginWithExportConversion.friendly_name)
@mock.patch.object(
api_call_router_with_approval_checks.ApiCallRouterWithApprovalChecks,
"GetExportedFlowResults",
return_value=api_flow.ApiGetExportedFlowResultsHandler())
def testClickingOnDownloadAsSqliteZipStartsDownload(self, mock_method):
self.checkClickingOnDownloadAsStartsDownloadForType(
mock_method, sqlite_plugin.SqliteInstantOutputPlugin.plugin_name,
sqlite_plugin.SqliteInstantOutputPlugin.friendly_name)
def checkClickingOnDownloadAsStartsDownloadForType(self, mock_method, plugin,
plugin_display_name):
pathspec = rdf_paths.PathSpec(
path=os.path.join(self.base_path, "test.plist"),
pathtype=rdf_paths.PathSpec.PathType.OS)
session_id = flow_test_lib.TestFlowHelper(
flows_transfer.GetFile.__name__,
pathspec=pathspec,
client_mock=self.action_mock,
client_id=self.client_id,
token=self.token)
self.Open("/#/clients/%s/flows/%s" % (self.client_id, session_id))
self.Click("link=Results")
self.Select("id=plugin-select", plugin_display_name)
self.Click("css=grr-download-collection-as button[name='download-as']")
def MockMethodIsCalled():
try:
# Mock should be called twice: once for HEAD (to check permissions)
# and once for GET methods.
mock_method.assert_called_with(
api_flow.ApiGetExportedFlowResultsArgs(
client_id=self.client_id,
flow_id=session_id,
plugin_name=plugin),
token=mock.ANY)
return True
except AssertionError:
return False
self.WaitUntil(MockMethodIsCalled)
def testDoesNotShowDownloadAsPanelIfCollectionIsEmpty(self):
session_id = flow_test_lib.TestFlowHelper(
gui_test_lib.RecursiveTestFlow.__name__,
client_mock=self.action_mock,
client_id=self.client_id,
token=self.token)
self.Open("/#/clients/%s/flows/%s" % (self.client_id, session_id))
self.Click("link=Results")
self.WaitUntil(self.IsTextPresent, "Value")
self.WaitUntilNot(self.IsElementPresent, "grr-download-collection-as")
if __name__ == "__main__":
app.run(test_lib.main)
|
dunkhong/grr
|
grr/server/grr_response_server/gui/selenium_tests/flow_archive_test.py
|
Python
|
apache-2.0
| 8,184
|
"""Commands related to networks are in this module"""
import click
import sys
from hil.cli.client_setup import client
@click.group()
def network():
"""Commands related to network"""
@network.command(name='create', short_help='Create a new network')
@click.argument('network')
@click.argument('owner')
@click.option('--access', help='Projects that can access this network. '
'Defaults to the owner of the network')
@click.option('--net-id',
help='Network ID for network. Only admins can specify this.')
def network_create(network, owner, access, net_id):
"""Create a link-layer <network>. See docs/networks.md for details"""
if net_id is None:
net_id = ''
if access is None:
access = owner
client.network.create(network, owner, access, net_id)
@network.command(name='delete')
@click.argument('network')
def network_delete(network):
"""Delete a network"""
client.network.delete(network)
@network.command(name='show')
@click.argument('network')
def network_show(network):
"""Display information about network"""
q = client.network.show(network)
for item in q.items():
sys.stdout.write("%s\t : %s\n" % (item[0], item[1]))
@network.command(name='list')
def network_list():
"""List all networks"""
q = client.network.list()
for item in q.items():
sys.stdout.write('%s \t : %s\n' % (item[0], item[1]))
@network.command('list-attachments')
@click.argument('network')
@click.option('--project', help='Name of project.')
def list_network_attachments(network, project):
"""Lists all the attachments from <project> for <network>
If <project> is `None`, lists all attachments for <network>
"""
print client.network.list_network_attachments(network, project)
@network.command(name='grant-access')
@click.argument('network')
@click.argument('project')
def network_grant_project_access(project, network):
"""Add <project> to <network> access"""
client.network.grant_access(project, network)
@network.command(name='revoke-access')
@click.argument('network')
@click.argument('project')
def network_revoke_project_access(project, network):
"""Remove <project> from <network> access"""
client.network.revoke_access(project, network)
|
SahilTikale/haas
|
hil/cli/network.py
|
Python
|
apache-2.0
| 2,277
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import gtk
from objects.gradientcolor import GradientColor
from objects.gradient import Gradient
from interfaces.signalizable import Signalizable
class GradientLine(gtk.Viewport):
def __init__(self, moving_callback=None, color_callback=None, gradient=None):
"""
moving_callback - callback function to be called when changing position of the selected color(for spin widget)
gradient - editable gradient
"""
gtk.Viewport.__init__(self)
self.set_size_request(-1, 70)
self.set_shadow_type(gtk.SHADOW_NONE)
self.width = 0
self.height = 0
self._motion = False
self.selected = -1
self.x = 0
self.move = False
self.gradient = gradient
self.gradient.change_size(0, 0, 1, 0)
self.moving_callback = moving_callback
self.color_callback = color_callback
self.layout = gtk.Layout()
self.add(self.layout)
self.layout.set_events(0)
self.layout.add_events(gtk.gdk.BUTTON_PRESS_MASK)
self.layout.connect("button-press-event", self.press)
self.layout.add_events(gtk.gdk.EXPOSURE_MASK)
self.layout.connect("expose-event", self.expose)
self.layout.add_events(gtk.gdk.BUTTON_RELEASE_MASK)
self.layout.connect("button-release-event", self.release)
self.layout.add_events(gtk.gdk.POINTER_MOTION_MASK)
self.layout.connect("motion-notify-event", self.motion)
self.layout.add_events(gtk.gdk.ENTER_NOTIFY_MASK)
self.layout.connect("enter-notify-event", self.enter)
self.layout.add_events(gtk.gdk.LEAVE_NOTIFY_MASK)
self.layout.connect("leave-notify-event", self.leave)
def update(self):
self.queue_draw()
def set_position_for_selected(self, x):
self.gradient.set_position(self.selected, x)
def set_color_for_selected(self, color):
color.position = self.gradient.colors[self.selected].position
self.gradient.set_color(self.selected, color)
def motion(self, widget, event):
self._motion = True
self.x = event.x
if self.move:
if self.selected >= 0:
if self.moving_callback:
self.moving_callback(event.x / self.width)
self.set_position_for_selected(event.x / self.width)
self.gradient.update()
self.queue_draw()
return True
def enter(self, widget, event):
return True
def leave(self, widget, event):
self._motion = False
self.x = event.x
self.queue_draw()
return True
def press(self, widget, event):
self.move = True
cnt = len(self.gradient.colors)
if cnt > 0:
for col in range(0, cnt):
if (self.gradient.colors[col].position > (event.x / self.width - 0.01)) and (
self.gradient.colors[col].position < (event.x / self.width + 0.01)):
self.selected = col
self.moving_callback(self.gradient.colors[col].position)
self.color_callback(self.gradient.colors[col])
break
else:
self.selected = -1
if self.selected == -1 or not cnt:
self.gradient.add_new_color(GradientColor(1, 1, 0.1, 1.0, event.x / self.width))
self.selected = len(self.gradient.colors)-1
self.moving_callback(self.gradient.colors[self.selected].position)
self.color_callback(self.gradient.colors[self.selected])
self.gradient.update()
self.queue_draw()
def release(self, widget, event):
self.move = False
self.queue_draw()
def expose(self, widget, event):
context = widget.bin_window.cairo_create()
self.width, self.height = widget.window.get_size()
context.save()
context.new_path()
#context.translate(0, 0)
if (self.width > 0) and (self.height > 0):
context.scale(self.width, self.height)
context.rectangle(0, 0, 1, 1)
context.set_source(self.gradient.gradient)
context.fill_preserve()
context.restore()
if self._motion and not self.move:
context.new_path()
dash = list()
context.set_dash(dash)
context.set_line_width(2)
context.move_to(self.x, 0)
context.line_to(self.x, 30)
context.move_to(self.x, self.height - 30)
context.line_to(self.x, self.height)
scol = sorted(self.gradient.colors,
key=lambda color: color.position) # better in __init__ and update when necessary
cnt = len(scol)
rx = self.x / self.width
index = 0
for col in scol:
if rx < col.position:
for c in range(0, cnt):
if self.gradient.colors[c].position == col.position:
index = c
break
break
r = self.gradient.colors[index].red
g = self.gradient.colors[index].green
b = self.gradient.colors[index].blue
l = 1 - (r + g + b) / 3.0
if l >= 0.5:
l = 1
else:
l = 0
r, g, b = l, l, l
context.set_source_rgba(r, g, b, 1.0)
context.stroke()
for color in range(len(self.gradient.colors)):
if color == self.selected:
delta = 10
else:
delta = 0
context.new_path()
pos = int(self.width * self.gradient.colors[color].position)
context.move_to(pos - 5, 0)
context.line_to(pos + 5, 0)
context.line_to(pos, 20)
context.line_to(pos - 5, 0)
context.set_source_rgb(self.gradient.colors[color].alpha, self.gradient.colors[color].alpha,
self.gradient.colors[color].alpha)
context.fill_preserve()
if delta:
context.move_to(pos, 20)
context.line_to(pos, 20 + delta)
context.set_source_rgb(0.44, 0.62, 0.81)
context.stroke()
class LinearGradientEditor(gtk.VBox, Signalizable):
def __init__(self):
gtk.VBox.__init__(self)
from canvas import Canvas
self.canvas = Canvas()
table = gtk.Table(4, 4, False)
self.pack_start(table)
self.combobox = gtk.combo_box_new_text()
table.attach(self.combobox, 1, 2, 0, 1, gtk.FILL, 0)
gradient = Gradient()
self.gl = GradientLine(self.moving_callback, self.color_callback, gradient)
table.attach(self.gl, 1, 2, 1, 2, gtk.FILL | gtk.EXPAND, 0)
new_color = gtk.Button()
image = gtk.Image()
image.set_from_stock(gtk.STOCK_NEW, gtk.ICON_SIZE_MENU)
new_color.add(image)
table.attach(new_color, 2, 3, 0, 1, 0, 0, 0)
button = gtk.Button()
image = gtk.Image()
image.set_from_stock(gtk.STOCK_GO_FORWARD, gtk.ICON_SIZE_MENU)
button.add(image)
button.connect("clicked", self.forward)
table.attach(button, 2, 3, 1, 2, 0, gtk.FILL, 0)
button = gtk.Button()
image = gtk.Image()
image.set_from_stock(gtk.STOCK_GO_BACK, gtk.ICON_SIZE_MENU)
button.add(image)
button.connect("clicked", self.back)
table.attach(button, 0, 1, 1, 2, 0, gtk.FILL, 0)
hbox = gtk.HBox()
label = gtk.Label(_("Color:"))
hbox.pack_start(label)
self.color_button = gtk.ColorButton()
self.color_button.set_use_alpha(True)
self.color_button.connect("color-set", self.set_gradient_color)
hbox.pack_start(self.color_button)
label = gtk.Label(_("Position:"))
hbox.pack_start(label)
self.sel_position = gtk.SpinButton(climb_rate=0.00001, digits=5)
self.sel_position.set_range(0.0, 1.0)
self.sel_position.set_wrap(True)
self.sel_position.set_increments(0.00001, 0.1)
self.sel_position.connect("value-changed", self.move_color)
hbox.pack_start(self.sel_position)
table.attach(hbox, 1, 2, 2, 3, gtk.FILL, 0, 0)
self.install_signal("update")
self.show_all()
def set_value(self, value):
self.gl.gradient = Gradient(string=str(value))
def forward(self, widget):
if self.gl:
if self.gl.selected < len(self.gl.gradient.colors) - 1:
self.gl.selected += 1
else:
self.gl.selected = -1
self.moving_callback(self.gl.gradient.colors[self.gl.selected].position)
self.update()
def back(self, widget):
if self.gl:
if self.gl.selected > -1:
self.gl.selected -= 1
else:
self.gl.selected = len(self.gl.gradient.colors) - 1
self.moving_callback(self.gl.gradient.colors[self.gl.selected].position)
self.update()
def moving_callback(self, x):
self.sel_position.set_value(x)
self.update()
def color_callback(self, color):
self.color_button.set_color(gtk.gdk.Color(float(color.red), float(color.green), float(color.blue)))
self.color_button.set_alpha(int(color.alpha * 65535))
self.update()
def move_color(self, widget):
if self.gl:
self.gl.set_position_for_selected(widget.get_value())
self.update()
def set_gradient_color(self, widget):
if self.gl:
col = GradientColor(widget.get_color().red_float, widget.get_color().green_float,
widget.get_color().blue_float, widget.get_alpha() / 65535.0,0)
self.gl.set_color_for_selected(col)
self.update()
def update(self):
self.gl.update()
self.emit("update", self)
#self.canvas.update()
if __name__ == '__main__':
horizontal_window = gtk.Window()
horizontal_window.set_default_size(500, 100)
horizontal_window.connect("delete-event", gtk.main_quit)
ge = LinearGradientEditor()
horizontal_window.add(ge)
horizontal_window.show_all()
gtk.main()
|
jaliste/sanaviron
|
sanaviron/ui/gradienteditor.py
|
Python
|
apache-2.0
| 10,331
|
import argparse
from dvc.command.base import CmdBase, append_doc_link, fix_subparsers
from dvc.command.config import CmdConfig
from dvc.compare import TabularData
from dvc.config import ConfigError
from dvc.exceptions import DvcException
from dvc.types import Dict, List
from dvc.ui import ui
from dvc.utils import format_link
class MachineDisabledError(ConfigError):
def __init__(self):
super().__init__("Machine feature is disabled")
class CmdMachineConfig(CmdConfig):
def __init__(self, args):
super().__init__(args)
if not self.config["feature"].get("machine", False):
raise MachineDisabledError
if getattr(self.args, "name", None):
self.args.name = self.args.name.lower()
def _check_exists(self, conf):
if self.args.name not in conf["machine"]:
raise ConfigError(f"machine '{self.args.name}' doesn't exist.")
class CmdMachineAdd(CmdMachineConfig):
def run(self):
from dvc.machine import validate_name
validate_name(self.args.name)
if self.args.default:
ui.write(f"Setting '{self.args.name}' as a default machine.")
with self.config.edit(self.args.level) as conf:
if self.args.name in conf["machine"] and not self.args.force:
raise ConfigError(
"machine '{}' already exists. Use `-f|--force` to "
"overwrite it.".format(self.args.name)
)
conf["machine"][self.args.name] = {"cloud": self.args.cloud}
if self.args.default:
conf["core"]["machine"] = self.args.name
return 0
class CmdMachineRemove(CmdMachineConfig):
def run(self):
with self.config.edit(self.args.level) as conf:
self._check_exists(conf)
del conf["machine"][self.args.name]
up_to_level = self.args.level or "repo"
# Remove core.machine refs to this machine in any shadowing configs
for level in reversed(self.config.LEVELS):
with self.config.edit(level) as conf:
if conf["core"].get("machine") == self.args.name:
del conf["core"]["machine"]
if level == up_to_level:
break
return 0
class CmdMachineList(CmdMachineConfig):
TABLE_COLUMNS = [
"name",
"cloud",
"region",
"image",
"spot",
"spot_price",
"instance_hdd_size",
"instance_type",
"ssh_private",
"startup_script",
]
PRIVATE_COLUMNS = ["ssh_private", "startup_script"]
def _hide_private(self, conf):
for machine in conf:
for column in self.PRIVATE_COLUMNS:
if column in conf[machine]:
conf[machine][column] = "***"
def _show_origin(self):
levels = [self.args.level] if self.args.level else self.config.LEVELS
for level in levels:
conf = self.config.read(level)["machine"]
if self.args.name:
conf = conf.get(self.args.name, {})
self._hide_private(conf)
prefix = self._config_file_prefix(True, self.config, level)
configs = list(self._format_config(conf, prefix))
if configs:
ui.write("\n".join(configs))
def _show_table(self):
td = TabularData(self.TABLE_COLUMNS, fill_value="-")
conf = self.config.read()["machine"]
if self.args.name:
conf = {self.args.name: conf.get(self.args.name, {})}
self._hide_private(conf)
for machine, machine_config in conf.items():
machine_config["name"] = machine
td.row_from_dict(machine_config)
td.dropna("cols", "all")
td.render()
def run(self):
if self.args.show_origin:
self._show_origin()
else:
self._show_table()
return 0
class CmdMachineModify(CmdMachineConfig):
def run(self):
from dvc.config import merge
with self.config.edit(self.args.level) as conf:
merged = self.config.load_config_to_level(self.args.level)
merge(merged, conf)
self._check_exists(merged)
if self.args.name not in conf["machine"]:
conf["machine"][self.args.name] = {}
section = conf["machine"][self.args.name]
if self.args.unset:
section.pop(self.args.option, None)
else:
section[self.args.option] = self.args.value
return 0
class CmdMachineRename(CmdBase):
def _check_exists(self, conf):
if self.args.name not in conf["machine"]:
raise ConfigError(f"machine '{self.args.name}' doesn't exist.")
def _rename_default(self, conf):
if conf["core"].get("machine") == self.args.name:
conf["core"]["machine"] = self.args.new
def _check_before_rename(self):
from dvc.machine import validate_name
validate_name(self.args.new)
all_config = self.config.load_config_to_level(None)
if self.args.new in all_config.get("machine", {}):
raise ConfigError(
"Rename failed. Machine '{}' already exists.".format(
self.args.new
)
)
ui.write(f"Rename machine '{self.args.name}' to '{self.args.new}'.")
def run(self):
self._check_before_rename()
with self.config.edit(self.args.level) as conf:
self._check_exists(conf)
conf["machine"][self.args.new] = conf["machine"][self.args.name]
try:
self.repo.machine.rename(self.args.name, self.args.new)
except DvcException as error:
del conf["machine"][self.args.new]
raise ConfigError("terraform rename failed") from error
del conf["machine"][self.args.name]
self._rename_default(conf)
up_to_level = self.args.level or "repo"
for level in reversed(self.config.LEVELS):
if level == up_to_level:
break
with self.config.edit(level) as level_conf:
self._rename_default(level_conf)
return 0
class CmdMachineDefault(CmdMachineConfig):
def run(self):
if self.args.name is None and not self.args.unset:
conf = self.config.read(self.args.level)
try:
print(conf["core"]["machine"])
except KeyError:
ui.write("No default machine set")
return 1
else:
with self.config.edit(self.args.level) as conf:
if self.args.unset:
conf["core"].pop("machine", None)
else:
merged_conf = self.config.load_config_to_level(
self.args.level
)
if (
self.args.name in conf["machine"]
or self.args.name in merged_conf["machine"]
):
conf["core"]["machine"] = self.args.name
else:
raise ConfigError(
"default machine must be present in machine "
"list."
)
return 0
class CmdMachineCreate(CmdBase):
def run(self):
if self.repo.machine is None:
raise MachineDisabledError
self.repo.machine.create(self.args.name)
return 0
class CmdMachineStatus(CmdBase):
INSTANCE_FIELD = ["name", "instance", "status"]
SHOWN_FIELD = [
"cloud",
"instance_ip",
"instance_type",
"instance_hdd_size",
"instance_gpu",
]
def _add_row(
self,
name: str,
all_status: List[Dict],
td: TabularData,
):
if not all_status:
row = [name, None, "offline"]
td.append(row)
for i, status in enumerate(all_status, start=1):
row = [name, f"num_{i}", "running" if status else "offline"]
for field in self.SHOWN_FIELD:
value = str(status.get(field, ""))
row.append(value)
td.append(row)
def run(self):
if self.repo.machine is None:
raise MachineDisabledError
td = TabularData(
self.INSTANCE_FIELD + self.SHOWN_FIELD, fill_value="-"
)
if self.args.name:
all_status = list(self.repo.machine.status(self.args.name))
self._add_row(self.args.name, all_status, td)
else:
name_set = set()
for level in self.repo.config.LEVELS:
conf = self.repo.config.read(level)["machine"]
name_set.update(conf.keys())
name_list = list(name_set)
for name in sorted(name_list):
all_status = list(self.repo.machine.status(name))
self._add_row(name, all_status, td)
td.dropna("cols", "all")
td.render()
return 0
class CmdMachineDestroy(CmdBase):
def run(self):
if self.repo.machine is None:
raise MachineDisabledError
self.repo.machine.destroy(self.args.name)
return 0
class CmdMachineSsh(CmdBase):
def run(self):
if self.repo.machine is None:
raise MachineDisabledError
self.repo.machine.run_shell(self.args.name)
return 0
def add_parser(subparsers, parent_parser):
from dvc.command.config import parent_config_parser
machine_HELP = "Set up and manage cloud machines."
machine_parser = subparsers.add_parser(
"machine",
parents=[parent_parser],
description=append_doc_link(machine_HELP, "machine"),
# NOTE: suppress help during development to hide command
# help=machine_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
machine_subparsers = machine_parser.add_subparsers(
dest="cmd",
help="Use `dvc machine CMD --help` for " "command-specific help.",
)
fix_subparsers(machine_subparsers)
machine_ADD_HELP = "Add a new data machine."
machine_add_parser = machine_subparsers.add_parser(
"add",
parents=[parent_config_parser, parent_parser],
description=append_doc_link(machine_ADD_HELP, "machine/add"),
help=machine_ADD_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
machine_add_parser.add_argument("name", help="Name of the machine")
machine_add_parser.add_argument(
"cloud",
help="Machine cloud. See full list of supported clouds at {}".format(
format_link(
"https://github.com/iterative/"
"terraform-provider-iterative#machine"
)
),
)
machine_add_parser.add_argument(
"-d",
"--default",
action="store_true",
default=False,
help="Set as default machine.",
)
machine_add_parser.add_argument(
"-f",
"--force",
action="store_true",
default=False,
help="Force overwriting existing configs",
)
machine_add_parser.set_defaults(func=CmdMachineAdd)
machine_DEFAULT_HELP = "Set/unset the default machine."
machine_default_parser = machine_subparsers.add_parser(
"default",
parents=[parent_config_parser, parent_parser],
description=append_doc_link(machine_DEFAULT_HELP, "machine/default"),
help=machine_DEFAULT_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
machine_default_parser.add_argument(
"name", nargs="?", help="Name of the machine"
)
machine_default_parser.add_argument(
"-u",
"--unset",
action="store_true",
default=False,
help="Unset default machine.",
)
machine_default_parser.set_defaults(func=CmdMachineDefault)
machine_LIST_HELP = "List the configuration of one/all machines."
machine_list_parser = machine_subparsers.add_parser(
"list",
parents=[parent_config_parser, parent_parser],
description=append_doc_link(machine_LIST_HELP, "machine/list"),
help=machine_LIST_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
machine_list_parser.add_argument(
"--show-origin",
default=False,
action="store_true",
help="Show the source file containing each config value.",
)
machine_list_parser.add_argument(
"name",
nargs="?",
type=str,
help="name of machine to specify",
)
machine_list_parser.set_defaults(func=CmdMachineList)
machine_MODIFY_HELP = "Modify the configuration of an machine."
machine_modify_parser = machine_subparsers.add_parser(
"modify",
parents=[parent_config_parser, parent_parser],
description=append_doc_link(machine_MODIFY_HELP, "machine/modify"),
help=machine_MODIFY_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
machine_modify_parser.add_argument("name", help="Name of the machine")
machine_modify_parser.add_argument(
"option", help="Name of the option to modify."
)
machine_modify_parser.add_argument(
"value", nargs="?", help="(optional) Value of the option."
)
machine_modify_parser.add_argument(
"-u",
"--unset",
default=False,
action="store_true",
help="Unset option.",
)
machine_modify_parser.set_defaults(func=CmdMachineModify)
machine_RENAME_HELP = "Rename a machine "
machine_rename_parser = machine_subparsers.add_parser(
"rename",
parents=[parent_config_parser, parent_parser],
description=append_doc_link(machine_RENAME_HELP, "remote/rename"),
help=machine_RENAME_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
machine_rename_parser.add_argument("name", help="Machine to be renamed")
machine_rename_parser.add_argument("new", help="New name of the machine")
machine_rename_parser.set_defaults(func=CmdMachineRename)
machine_REMOVE_HELP = "Remove an machine."
machine_remove_parser = machine_subparsers.add_parser(
"remove",
parents=[parent_config_parser, parent_parser],
description=append_doc_link(machine_REMOVE_HELP, "machine/remove"),
help=machine_REMOVE_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
machine_remove_parser.add_argument(
"name", help="Name of the machine to remove."
)
machine_remove_parser.set_defaults(func=CmdMachineRemove)
machine_CREATE_HELP = "Create and start a machine instance."
machine_create_parser = machine_subparsers.add_parser(
"create",
parents=[parent_parser],
description=append_doc_link(machine_CREATE_HELP, "machine/create"),
help=machine_CREATE_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
machine_create_parser.add_argument(
"name", help="Name of the machine to create."
)
machine_create_parser.set_defaults(func=CmdMachineCreate)
machine_STATUS_HELP = (
"List the status of running instances for one/all machines."
)
machine_status_parser = machine_subparsers.add_parser(
"status",
parents=[parent_parser],
description=append_doc_link(machine_STATUS_HELP, "machine/status"),
help=machine_STATUS_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
machine_status_parser.add_argument(
"name", nargs="?", help="(optional) Name of the machine."
)
machine_status_parser.set_defaults(func=CmdMachineStatus)
machine_DESTROY_HELP = "Destroy an machine instance."
machine_destroy_parser = machine_subparsers.add_parser(
"destroy",
parents=[parent_parser],
description=append_doc_link(machine_DESTROY_HELP, "machine/destroy"),
help=machine_DESTROY_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
machine_destroy_parser.add_argument(
"name", help="Name of the machine instance to destroy."
)
machine_destroy_parser.set_defaults(func=CmdMachineDestroy)
machine_SSH_HELP = "Connect to a machine via SSH."
machine_ssh_parser = machine_subparsers.add_parser(
"ssh",
parents=[parent_parser],
description=append_doc_link(machine_SSH_HELP, "machine/ssh"),
help=machine_SSH_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
machine_ssh_parser.add_argument(
"name", help="Name of the machine instance to connect to."
)
machine_ssh_parser.set_defaults(func=CmdMachineSsh)
|
dmpetrov/dataversioncontrol
|
dvc/command/machine.py
|
Python
|
apache-2.0
| 16,882
|
import os
from os import walk
templatePath = r'templates/serviceTemplate.txt'
writePath = r'/Source/Api/service-hmlFhirConverter/src/main/java/org/nmdp/hmlfhirconverter/service'
class ServiceGenerator:
def get_template(self):
with open(templatePath, 'r') as fileReader:
return fileReader.read()
def write_file(self, fileContents, fileName):
path = os.path.join(writePath, self.get_file_name(fileName))
with open(path, 'w') as fileWriter:
fileWriter.write(fileContents)
def get_file_name(self, className):
return className + 'Service.java'
def file_exists(self, className):
for (dirpath, dirnames, filenames) in walk(writePath):
return self.get_file_name(className) in filenames
|
nmdp-bioinformatics/service-hml-fhir-converter
|
CodeGen/hmlFhirConverterCodeGenerator/codegen/service/ServiceGenerator.py
|
Python
|
apache-2.0
| 776
|
#coding:UTF-8
"""
磁盘监控模块
"""
from config import disk
from lib import core
import os,re
def init():
"对外接口"
sign=True
for t in disk.DISK_PATH:
warn,data=check(t)
if not warn:
login_time=time.time()
message="磁盘监控预警提示,磁盘使用率超过%s"%(disk.DISK_USED)+"%\n监控结果:"+data
message=message.decode("UTF-8")
print message
core.sendEmail(message)
print u"邮件已经发出"
sign=False
return sign
def getIntervalTime():
"获取检测间隔时间"
return disk.DISK_DELAY
def check(path):
"检测是否超出预警"
r=os.popen("df -h "+path)
for line in r:
data=line.rstrip()
datas=re.split(r'\s+',data)
used=datas[4].encode("UTF-8").replace("%","")
return int(used) < disk.DISK_USED,data
|
yubang/smallMonitor
|
lib/disk.py
|
Python
|
apache-2.0
| 903
|
from a10sdk.common.A10BaseClass import A10BaseClass
class DfBitTransparency(A10BaseClass):
"""Class Description::
Add an empty IPv6 fragmentation header if IPv4 DF bit is zero (default:disabled).
Class df-bit-transparency supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param df_bit_value: {"optional": true, "enum": ["enable"], "type": "string", "description": "'enable': Add an empty IPv6 fragmentation header if IPv4 DF bit is zero; ", "format": "enum"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/cgnv6/nat64/fragmentation/df-bit-transparency`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "df-bit-transparency"
self.a10_url="/axapi/v3/cgnv6/nat64/fragmentation/df-bit-transparency"
self.DeviceProxy = ""
self.df_bit_value = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
amwelch/a10sdk-python
|
a10sdk/core/cgnv6/cgnv6_nat64_fragmentation_df_bit_transparency.py
|
Python
|
apache-2.0
| 1,367
|
# Copyright (c) 2013 Bull.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from climate.api.v1.oshosts import service as service_api
from climate import tests
class RPCApiTestCase(tests.TestCase):
def setUp(self):
super(RPCApiTestCase, self).setUp()
self.s_api = service_api
self.fake_list = []
self.fake_computehost = {}
fake_get_computehosts = self.patch(self.s_api.API, "get_computehosts")
fake_get_computehosts.return_value = self.fake_list
self.patch(self.s_api.API, "create_computehost").return_value = True
fake_get_computehost = self.patch(self.s_api.API, "get_computehost")
fake_get_computehost.return_value = self.fake_computehost
self.patch(self.s_api.API, "update_computehost").return_value = True
self.patch(self.s_api.API, "delete_computehost").return_value = True
def test_get_computehost(self):
pass
def test_create_computehost(self):
pass
def test_update_computehost(self):
pass
def test_delete_computehost(self):
pass
|
paramite/blazar
|
climate/tests/api/v1/oshosts/test_service.py
|
Python
|
apache-2.0
| 1,584
|
import dpkt
import socket
import logging
l = logging.getLogger("simuvex.s_pcap")
class PCAP(object):
def __init__(self,path, ip_port_tup, init=True):
self.path = path
self.packet_num = 0
self.pos = 0
self.in_streams = []
self.out_streams = []
#self.in_buf = ''
self.ip = ip_port_tup[0]
self.port = ip_port_tup[1]
if init:
self.initialize(self.path)
def initialize(self,path):
#import ipdb;ipdb.set_trace()
f = open(path)
pcap = dpkt.pcap.Reader(f)
for _,buf in pcap:
#data = dpkt.ethernet.Ethernet(buf).ip.data.data
ip = dpkt.ethernet.Ethernet(buf).ip
tcp = ip.data
myip = socket.inet_ntoa(ip.dst)
if myip is self.ip and tcp.dport is self.port and len(tcp.data) is not 0:
self.out_streams.append((len(tcp.data),tcp.data))
elif len(tcp.data) is not 0:
self.in_streams.append((len(tcp.data),tcp.data))
f.close()
def recv(self, length):
#import ipdb;ipdb.set_trace()
temp = 0
#import ipdb;ipdb.set_trace()
#pcap = self.pcap
initial_packet = self.packet_num
plength, pdata = self.in_streams[self.packet_num]
length = min(length, plength)
if self.pos is 0:
if plength > length:
temp = length
else:
self.packet_num += 1
packet_data = pdata[self.pos:length]
self.pos += temp
else:
if (self.pos + length) >= plength:
rest = plength-self.pos
length = rest
self.packet_num += 1
packet_data = pdata[self.pos:plength]
if self.packet_num is not initial_packet:
self.pos = 0
return packet_data, length
def copy(self):
new_pcap = PCAP(self.path, (self.ip, self.port), init=False)
new_pcap.packet_num = self.packet_num
new_pcap.pos = self.pos
new_pcap.in_streams = self.in_streams
new_pcap.out_streams = self.out_streams
return new_pcap
|
chubbymaggie/simuvex
|
simuvex/s_pcap.py
|
Python
|
bsd-2-clause
| 2,173
|
from syslog import syslog
module_name = "Syslog"
config = {
"prefix": "Default Prefix"
}
def handle_alert(message):
syslog("{} - {}".format(config["prefix"], message))
|
camerongray1515/Prophasis
|
application/prophasis_common/prophasis_common/alert_modules/syslog.py
|
Python
|
bsd-2-clause
| 179
|
#
# CORE
# Copyright (c)2010-2013 the Boeing Company.
# See the LICENSE file included in this distribution.
#
# authors: Tom Goff <thomas.goff@boeing.com>
# Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
#
'''
session.py: defines the Session class used by the core-daemon daemon program
that manages a CORE session.
'''
import os, sys, tempfile, shutil, shlex, atexit, gc, pwd
import threading, time, random
from core.api import coreapi
if os.uname()[0] == "Linux":
from core.netns import nodes
from core.netns.vnet import GreTapBridge
elif os.uname()[0] == "FreeBSD":
from core.bsd import nodes
from core.emane import emane
from core.misc.utils import check_call, mutedetach, readfileintodict, \
filemunge, filedemunge
from core.conf import ConfigurableManager, Configurable
from core.location import CoreLocation
from core.service import CoreServices
from core.broker import CoreBroker
from core.mobility import MobilityManager
from core.sdt import Sdt
from core.misc.ipaddr import MacAddr
from core.misc.event import EventLoop
from core.constants import *
from core.xen import xenconfig
class Session(object):
# sessions that get automatically shutdown when the process
# terminates normally
__sessions = set()
''' CORE session manager.
'''
def __init__(self, sessionid = None, cfg = {}, server = None,
persistent = False, mkdir = True):
if sessionid is None:
# try to keep this short since it's used to construct
# network interface names
pid = os.getpid()
sessionid = ((pid >> 16) ^
(pid & ((1 << 16) - 1)))
sessionid ^= ((id(self) >> 16) ^ (id(self) & ((1 << 16) - 1)))
sessionid &= 0xffff
self.sessionid = sessionid
self.sessiondir = os.path.join(tempfile.gettempdir(),
"pycore.%s" % self.sessionid)
if mkdir:
os.mkdir(self.sessiondir)
self.name = None
self.filename = None
self.thumbnail = None
self.user = None
self.node_count = None
self._time = time.time()
self.evq = EventLoop()
# dict of objects: all nodes and nets
self._objs = {}
self._objslock = threading.Lock()
# dict of configurable objects
self._confobjs = {}
self._confobjslock = threading.Lock()
self._handlers = set()
self._handlerslock = threading.Lock()
self._state = None
self._hooks = {}
self._state_hooks = {}
# dict of configuration items from /etc/core/core.conf config file
self.cfg = cfg
self.add_state_hook(coreapi.CORE_EVENT_RUNTIME_STATE,
self.runtime_state_hook)
self.setstate(state=coreapi.CORE_EVENT_DEFINITION_STATE,
info=False, sendevent=False)
self.server = server
if not persistent:
self.addsession(self)
self.master = False
self.broker = CoreBroker(session=self, verbose=True)
self.location = CoreLocation(self)
self.mobility = MobilityManager(self)
self.services = CoreServices(self)
self.emane = emane.Emane(self)
self.xen = xenconfig.XenConfigManager(self)
self.sdt = Sdt(self)
# future parameters set by the GUI may go here
self.options = SessionConfig(self)
self.metadata = SessionMetaData(self)
@classmethod
def addsession(cls, session):
cls.__sessions.add(session)
@classmethod
def delsession(cls, session):
try:
cls.__sessions.remove(session)
except KeyError:
pass
@classmethod
def atexit(cls):
while cls.__sessions:
s = cls.__sessions.pop()
print >> sys.stderr, "WARNING: automatically shutting down " \
"non-persistent session %s" % s.sessionid
s.shutdown()
def __del__(self):
# note: there is no guarantee this will ever run
self.shutdown()
def shutdown(self):
''' Shut down all emulation objects and remove the session directory.
'''
if hasattr(self, 'emane'):
self.emane.shutdown()
if hasattr(self, 'broker'):
self.broker.shutdown()
if hasattr(self, 'sdt'):
self.sdt.shutdown()
self.delobjs()
preserve = False
if hasattr(self.options, 'preservedir'):
if self.options.preservedir == '1':
preserve = True
if not preserve:
shutil.rmtree(self.sessiondir, ignore_errors = True)
if self.server:
self.server.delsession(self)
self.delsession(self)
def isconnected(self):
''' Returns true if this session has a request handler.
'''
with self._handlerslock:
if len(self._handlers) == 0:
return False
else:
return True
def connect(self, handler):
''' Set the request handler for this session, making it connected.
'''
# the master flag will only be set after a GUI has connected with the
# handler, e.g. not during normal startup
if handler.master is True:
self.master = True
with self._handlerslock:
self._handlers.add(handler)
def disconnect(self, handler):
''' Disconnect a request handler from this session. Shutdown this
session if there is no running emulation.
'''
with self._handlerslock:
try:
self._handlers.remove(handler)
except KeyError:
raise ValueError, \
"Handler %s not associated with this session" % handler
num_handlers = len(self._handlers)
if num_handlers == 0:
# shut down this session unless we are instantiating, running,
# or collecting final data
if self.getstate() < coreapi.CORE_EVENT_INSTANTIATION_STATE or \
self.getstate() > coreapi.CORE_EVENT_DATACOLLECT_STATE:
self.shutdown()
def broadcast(self, src, msg):
''' Send Node and Link CORE API messages to all handlers connected to this session.
'''
self._handlerslock.acquire()
for handler in self._handlers:
if handler == src:
continue
if isinstance(msg, coreapi.CoreNodeMessage) or \
isinstance(msg, coreapi.CoreLinkMessage):
try:
handler.sendall(msg.rawmsg)
except Exception, e:
self.warn("sendall() error: %s" % e)
self._handlerslock.release()
def broadcastraw(self, src, data):
''' Broadcast raw data to all handlers except src.
'''
self._handlerslock.acquire()
for handler in self._handlers:
if handler == src:
continue
try:
handler.sendall(data)
except Exception, e:
self.warn("sendall() error: %s" % e)
self._handlerslock.release()
def gethandler(self):
''' Get one of the connected handlers, preferrably the master.
'''
with self._handlerslock:
if len(self._handlers) == 0:
return None
for handler in self._handlers:
if handler.master:
return handler
for handler in self._handlers:
return handler
def setstate(self, state, info = False, sendevent = False,
returnevent = False):
''' Set the session state. When info is true, log the state change
event using the session handler's info method. When sendevent is
true, generate a CORE API Event Message and send to the connected
entity.
'''
if state == self._state:
return []
self._time = time.time()
self._state = state
self.run_state_hooks(state)
replies = []
if self.isconnected() and info:
statename = coreapi.state_name(state)
with self._handlerslock:
for handler in self._handlers:
handler.info("SESSION %s STATE %d: %s at %s" % \
(self.sessionid, state, statename,
time.ctime()))
self.writestate(state)
self.runhook(state)
if self.isconnected() and sendevent:
tlvdata = ""
tlvdata += coreapi.CoreEventTlv.pack(coreapi.CORE_TLV_EVENT_TYPE,
state)
msg = coreapi.CoreEventMessage.pack(0, tlvdata)
# send Event Message to connected handlers (e.g. GUI)
try:
if returnevent:
replies.append(msg)
else:
self.broadcastraw(None, msg)
except Exception, e:
self.warn("Error sending Event Message: %s" % e)
# also inform slave servers
tmp = self.broker.handlerawmsg(msg)
return replies
def getstate(self):
''' Retrieve the current state of the session.
'''
return self._state
def writestate(self, state):
''' Write the current state to a state file in the session dir.
'''
try:
f = open(os.path.join(self.sessiondir, "state"), "w")
f.write("%d %s\n" % (state, coreapi.state_name(state)))
f.close()
except Exception, e:
self.warn("Error writing state file: %s" % e)
def runhook(self, state, hooks=None):
''' Run hook scripts upon changing states.
If hooks is not specified, run all hooks in the given state.
'''
if state not in self._hooks:
return
if hooks is None:
hooks = self._hooks[state]
for (filename, data) in hooks:
try:
f = open(os.path.join(self.sessiondir, filename), "w")
f.write(data)
f.close()
except Exception, e:
self.warn("Error writing hook '%s': %s" % (filename, e))
self.info("Running hook %s for state %s" % (filename, state))
try:
check_call(["/bin/sh", filename], cwd=self.sessiondir,
env=self.getenviron())
except Exception, e:
self.warn("Error running hook '%s' for state %s: %s" %
(filename, state, e))
def sethook(self, type, filename, srcname, data):
''' Store a hook from a received File Message.
'''
if srcname is not None:
raise NotImplementedError
(hookid, state) = type.split(':')[:2]
if not state.isdigit():
self.warn("Error setting hook having state '%s'" % state)
return
state = int(state)
hook = (filename, data)
if state not in self._hooks:
self._hooks[state] = [hook,]
else:
self._hooks[state] += hook
# immediately run a hook if it is in the current state
# (this allows hooks in the definition and configuration states)
if self.getstate() == state:
self.runhook(state, hooks = [hook,])
def delhooks(self):
''' Clear the hook scripts dict.
'''
self._hooks = {}
def run_state_hooks(self, state):
try:
hooks = self._state_hooks[state]
for hook in hooks:
hook(state)
except KeyError:
pass
def add_state_hook(self, state, hook):
try:
hooks = self._state_hooks[state]
assert hook not in hooks
hooks.append(hook)
except KeyError:
self._state_hooks[state] = [hook]
if self._state == state:
hook(state)
def del_state_hook(self, state, hook):
try:
hooks = self._state_hooks[state]
self._state_hooks[state] = filter(lambda x: x != hook, hooks)
except KeyError:
pass
def runtime_state_hook(self, state):
if state == coreapi.CORE_EVENT_RUNTIME_STATE:
self.emane.poststartup()
def getenviron(self, state=True):
''' Get an environment suitable for a subprocess.Popen call.
This is the current process environment with some session-specific
variables.
'''
env = os.environ.copy()
env['SESSION'] = "%s" % self.sessionid
env['SESSION_SHORT'] = "%s" % self.shortsessionid()
env['SESSION_DIR'] = "%s" % self.sessiondir
env['SESSION_NAME'] = "%s" % self.name
env['SESSION_FILENAME'] = "%s" % self.filename
env['SESSION_USER'] = "%s" % self.user
env['SESSION_NODE_COUNT'] = "%s" % self.node_count
if state:
env['SESSION_STATE'] = "%s" % self.getstate()
try:
readfileintodict(os.path.join(CORE_CONF_DIR, "environment"), env)
except IOError:
pass
if self.user:
try:
readfileintodict(os.path.join('/home', self.user, ".core",
"environment"), env)
except IOError:
pass
return env
def setthumbnail(self, thumbfile):
''' Set the thumbnail filename. Move files from /tmp to session dir.
'''
if not os.path.exists(thumbfile):
self.thumbnail = None
return
dstfile = os.path.join(self.sessiondir, os.path.basename(thumbfile))
shutil.move(thumbfile, dstfile)
#print "thumbnail: %s -> %s" % (thumbfile, dstfile)
self.thumbnail = dstfile
def setuser(self, user):
''' Set the username for this session. Update the permissions of the
session dir to allow the user write access.
'''
if user is not None:
try:
uid = pwd.getpwnam(user).pw_uid
gid = os.stat(self.sessiondir).st_gid
os.chown(self.sessiondir, uid, gid)
except Exception, e:
self.warn("Failed to set permission on %s: %s" % (self.sessiondir, e))
self.user = user
def objs(self):
''' Return iterator over the emulation object dictionary.
'''
return self._objs.itervalues()
def getobjid(self):
''' Return a unique, random object id.
'''
self._objslock.acquire()
while True:
id = random.randint(1, 0xFFFF)
if id not in self._objs:
break
self._objslock.release()
return id
def addobj(self, cls, *clsargs, **clskwds):
''' Add an emulation object.
'''
obj = cls(self, *clsargs, **clskwds)
self._objslock.acquire()
if obj.objid in self._objs:
self._objslock.release()
obj.shutdown()
raise KeyError, "non-unique object id %s for %s" % (obj.objid, obj)
self._objs[obj.objid] = obj
self._objslock.release()
return obj
def obj(self, objid):
''' Get an emulation object.
'''
if objid not in self._objs:
raise KeyError, "unknown object id %s" % (objid)
return self._objs[objid]
def objbyname(self, name):
''' Get an emulation object using its name attribute.
'''
with self._objslock:
for obj in self.objs():
if hasattr(obj, "name") and obj.name == name:
return obj
raise KeyError, "unknown object with name %s" % (name)
def delobj(self, objid):
''' Remove an emulation object.
'''
self._objslock.acquire()
try:
o = self._objs.pop(objid)
except KeyError:
o = None
self._objslock.release()
if o:
o.shutdown()
del o
gc.collect()
# print "gc count:", gc.get_count()
# for o in gc.get_objects():
# if isinstance(o, PyCoreObj):
# print "XXX XXX XXX PyCoreObj:", o
# for r in gc.get_referrers(o):
# print "XXX XXX XXX referrer:", gc.get_referrers(o)
def delobjs(self):
''' Clear the _objs dictionary, and call each obj.shutdown() routine.
'''
self._objslock.acquire()
while self._objs:
k, o = self._objs.popitem()
o.shutdown()
self._objslock.release()
def writeobjs(self):
''' Write objects to a 'nodes' file in the session dir.
The 'nodes' file lists:
number, name, api-type, class-type
'''
try:
f = open(os.path.join(self.sessiondir, "nodes"), "w")
with self._objslock:
for objid in sorted(self._objs.keys()):
o = self._objs[objid]
f.write("%s %s %s %s\n" % (objid, o.name, o.apitype, type(o)))
f.close()
except Exception, e:
self.warn("Error writing nodes file: %s" % e)
def addconfobj(self, objname, type, callback):
''' Objects can register configuration objects that are included in
the Register Message and may be configured via the Configure
Message. The callback is invoked when receiving a Configure Message.
'''
if type not in coreapi.reg_tlvs:
raise Exception, "invalid configuration object type"
self._confobjslock.acquire()
self._confobjs[objname] = (type, callback)
self._confobjslock.release()
def confobj(self, objname, session, msg):
''' Invoke the callback for an object upon receipt of a Configure
Message for that object. A no-op if the object doesn't exist.
'''
replies = []
self._confobjslock.acquire()
if objname == "all":
for objname in self._confobjs:
(type, callback) = self._confobjs[objname]
reply = callback(session, msg)
if reply is not None:
replies.append(reply)
self._confobjslock.release()
return replies
if objname in self._confobjs:
(type, callback) = self._confobjs[objname]
self._confobjslock.release()
reply = callback(session, msg)
if reply is not None:
replies.append(reply)
return replies
else:
self.info("session object doesn't own model '%s', ignoring" % \
objname)
self._confobjslock.release()
return replies
def confobjs_to_tlvs(self):
''' Turn the configuration objects into a list of Register Message TLVs.
'''
tlvdata = ""
self._confobjslock.acquire()
for objname in self._confobjs:
(type, callback) = self._confobjs[objname]
# type must be in coreapi.reg_tlvs
tlvdata += coreapi.CoreRegTlv.pack(type, objname)
self._confobjslock.release()
return tlvdata
def info(self, msg):
''' Utility method for writing output to stdout.
'''
print msg
sys.stdout.flush()
def warn(self, msg):
''' Utility method for writing output to stderr.
'''
print >> sys.stderr, msg
sys.stderr.flush()
def dumpsession(self):
''' Debug print this session.
'''
self.info("session id=%s name=%s state=%s connected=%s" % \
(self.sessionid, self.name, self._state, self.isconnected()))
num = len(self._objs)
self.info(" file=%s thumb=%s nc=%s/%s" % \
(self.filename, self.thumbnail, self.node_count, num))
def exception(self, level, source, objid, text):
''' Generate an Exception Message
'''
vals = (objid, str(self.sessionid), level, source, time.ctime(), text)
types = ("NODE", "SESSION", "LEVEL", "SOURCE", "DATE", "TEXT")
tlvdata = ""
for (t,v) in zip(types, vals):
if v is not None:
tlvdata += coreapi.CoreExceptionTlv.pack(
eval("coreapi.CORE_TLV_EXCP_%s" % t), v)
msg = coreapi.CoreExceptionMessage.pack(0, tlvdata)
self.warn("exception: %s (%s) %s" % (source, objid, text))
# send Exception Message to connected handlers (e.g. GUI)
self.broadcastraw(None, msg)
def getcfgitem(self, cfgname):
''' Return an entry from the configuration dictionary that comes from
command-line arguments and/or the core.conf config file.
'''
if cfgname not in self.cfg:
return None
else:
return self.cfg[cfgname]
def getcfgitembool(self, cfgname, defaultifnone = None):
''' Return a boolean entry from the configuration dictionary, may
return None if undefined.
'''
item = self.getcfgitem(cfgname)
if item is None:
return defaultifnone
return bool(item.lower() == "true")
def getcfgitemint(self, cfgname, defaultifnone = None):
''' Return an integer entry from the configuration dictionary, may
return None if undefined.
'''
item = self.getcfgitem(cfgname)
if item is None:
return defaultifnone
return int(item)
def instantiate(self, handler=None):
''' We have entered the instantiation state, invoke startup methods
of various managers and boot the nodes. Validate nodes and check
for transition to the runtime state.
'''
self.writeobjs()
# controlnet may be needed by some EMANE models
self.addremovectrlif(node=None, remove=False)
if self.emane.startup() == self.emane.NOT_READY:
return # instantiate() will be invoked again upon Emane.configure()
self.broker.startup()
self.mobility.startup()
# boot the services on each node
self.bootnodes(handler)
# allow time for processes to start
time.sleep(0.125)
self.validatenodes()
# assume either all nodes have booted already, or there are some
# nodes on slave servers that will be booted and those servers will
# send a node status response message
self.checkruntime()
def getnodecount(self):
''' Returns the number of CoreNodes and CoreNets, except for those
that are not considered in the GUI's node count.
'''
with self._objslock:
count = len(filter(lambda(x): \
not isinstance(x, (nodes.PtpNet, nodes.CtrlNet)),
self.objs()))
# on Linux, GreTapBridges are auto-created, not part of
# GUI's node count
if 'GreTapBridge' in globals():
count -= len(filter(lambda(x): \
isinstance(x, GreTapBridge) and not \
isinstance(x, nodes.TunnelNode),
self.objs()))
return count
def checkruntime(self):
''' Check if we have entered the runtime state, that all nodes have been
started and the emulation is running. Start the event loop once we
have entered runtime (time=0).
'''
# this is called from instantiate() after receiving an event message
# for the instantiation state, and from the broker when distributed
# nodes have been started
if self.node_count is None:
return
if self.getstate() == coreapi.CORE_EVENT_RUNTIME_STATE:
return
session_node_count = int(self.node_count)
nc = self.getnodecount()
# count booted nodes not emulated on this server
# TODO: let slave server determine RUNTIME and wait for Event Message
# broker.getbootocunt() counts all CoreNodes from status reponse
# messages, plus any remote WLANs; remote EMANE, hub, switch, etc.
# are already counted in self._objs
nc += self.broker.getbootcount()
self.info("Checking for runtime with %d of %d session nodes" % \
(nc, session_node_count))
if nc < session_node_count:
return # do not have information on all nodes yet
# information on all nodes has been received and they have been started
# enter the runtime state
# TODO: more sophisticated checks to verify that all nodes and networks
# are running
state = coreapi.CORE_EVENT_RUNTIME_STATE
self.evq.run()
self.setstate(state, info=True, sendevent=True)
def datacollect(self):
''' Tear down a running session. Stop the event loop and any running
nodes, and perform clean-up.
'''
self.evq.stop()
with self._objslock:
for obj in self.objs():
if isinstance(obj, nodes.PyCoreNode):
self.services.stopnodeservices(obj)
self.emane.shutdown()
self.updatectrlifhosts(remove=True)
self.addremovectrlif(node=None, remove=True)
# self.checkshutdown() is currently invoked from node delete handler
def checkshutdown(self):
''' Check if we have entered the shutdown state, when no running nodes
and links remain.
'''
nc = self.getnodecount()
# TODO: this doesn't consider slave server node counts
# wait for slave servers to enter SHUTDOWN state, then master session
# can enter SHUTDOWN
replies = ()
if self.getcfgitembool('verbose', False):
self.info("Session %d shutdown: %d nodes remaining" % \
(self.sessionid, nc))
if nc == 0:
replies = self.setstate(state=coreapi.CORE_EVENT_SHUTDOWN_STATE,
info=True, sendevent=True, returnevent=True)
self.sdt.shutdown()
return replies
def setmaster(self, handler):
''' Look for the specified handler and set our master flag
appropriately. Returns True if we are connected to the given
handler.
'''
with self._handlerslock:
for h in self._handlers:
if h != handler:
continue
self.master = h.master
return True
return False
def shortsessionid(self):
''' Return a shorter version of the session ID, appropriate for
interface names, where length may be limited.
'''
ssid = (self.sessionid >> 8) ^ (self.sessionid & ((1 << 8) - 1))
return "%x" % ssid
def sendnodeemuid(self, handler, nodenum):
''' Send back node messages to the GUI for node messages that had
the status request flag.
'''
if handler is None:
return
if nodenum in handler.nodestatusreq:
tlvdata = ""
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_NUMBER,
nodenum)
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_EMUID,
nodenum)
reply = coreapi.CoreNodeMessage.pack(coreapi.CORE_API_ADD_FLAG \
| coreapi.CORE_API_LOC_FLAG,
tlvdata)
try:
handler.request.sendall(reply)
except Exception, e:
self.warn("sendall() for node: %d error: %s" % (nodenum, e))
del handler.nodestatusreq[nodenum]
def bootnodes(self, handler):
''' Invoke the boot() procedure for all nodes and send back node
messages to the GUI for node messages that had the status
request flag.
'''
with self._objslock:
for n in self.objs():
if isinstance(n, nodes.PyCoreNode) and \
not isinstance(n, nodes.RJ45Node):
# add a control interface if configured
self.addremovectrlif(node=n, remove=False)
n.boot()
self.sendnodeemuid(handler, n.objid)
self.updatectrlifhosts()
def validatenodes(self):
with self._objslock:
for n in self.objs():
# TODO: this can be extended to validate everything
# such as vnoded process, bridges, etc.
if not isinstance(n, nodes.PyCoreNode):
continue
if isinstance(n, nodes.RJ45Node):
continue
n.validate()
def addremovectrlnet(self, remove=False, conf_reqd=True):
''' Create a control network bridge as necessary.
When the remove flag is True, remove the bridge that connects control
interfaces. The conf_reqd flag, when False, causes a control network
bridge to be added even if one has not been configured.
'''
prefix = self.cfg.get('controlnet')
prefix = getattr(self.options, 'controlnet', prefix)
if not prefix:
if conf_reqd:
return None # no controlnet needed
else:
prefix = nodes.CtrlNet.DEFAULT_PREFIX
# return any existing controlnet bridge
id = "ctrlnet"
try:
ctrlnet = self.obj(id)
if remove:
self.delobj(ctrlnet.objid)
return None
return ctrlnet
except KeyError:
if remove:
return None
# build a new controlnet bridge
updown_script = None
try:
if self.cfg['controlnet_updown_script']:
updown_script = self.cfg['controlnet_updown_script']
except KeyError:
pass
# Check if session option set, overwrite if so
if hasattr(self.options, 'controlnet_updown_script'):
new_uds = self.options.controlnet_updown_script
if new_uds:
updown_script = new_uds
prefixes = prefix.split()
if len(prefixes) > 1:
assign_address = True
if self.master:
try:
prefix = prefixes[0].split(':', 1)[1]
except IndexError:
prefix = prefixes[0] # possibly only one server
else:
# slave servers have their name and localhost in the serverlist
servers = self.broker.getserverlist()
servers.remove('localhost')
prefix = None
for server_prefix in prefixes:
try:
server, p = server_prefix.split(':')
except ValueError:
server = ""
p = None
if server == servers[0]:
prefix = p
break
if not prefix:
msg = "Control network prefix not found for server '%s'" % \
servers[0]
self.exception(coreapi.CORE_EXCP_LEVEL_ERROR,
"Session.addremovectrlnet()", None, msg)
assign_address = False
try:
prefix = prefixes[0].split(':', 1)[1]
except IndexError:
prefix = prefixes[0]
else:
# with one prefix, only master gets a ctrlnet address
assign_address = self.master
ctrlnet = self.addobj(cls=nodes.CtrlNet, objid=id, prefix=prefix,
assign_address=assign_address,
updown_script=updown_script)
# tunnels between controlnets will be built with Broker.addnettunnels()
self.broker.addnet(id)
for server in self.broker.getserverlist():
self.broker.addnodemap(server, id)
return ctrlnet
def addremovectrlif(self, node, remove=False, conf_reqd=True):
''' Add a control interface to a node when a 'controlnet' prefix is
listed in the config file or session options. Uses
addremovectrlnet() to build or remove the control bridge.
If conf_reqd is False, the control network may be built even
when the user has not configured one (e.g. for EMANE.)
'''
ctrlnet = self.addremovectrlnet(remove, conf_reqd)
if ctrlnet is None:
return
if node is None:
return
if node.netif(ctrlnet.CTRLIF_IDX_BASE):
return # ctrl0 already exists
ctrlip = node.objid
try:
addrlist = ["%s/%s" % (ctrlnet.prefix.addr(ctrlip),
ctrlnet.prefix.prefixlen)]
except ValueError:
msg = "Control interface not added to node %s. " % node.objid
msg += "Invalid control network prefix (%s). " % ctrlnet.prefix
msg += "A longer prefix length may be required for this many nodes."
node.exception(coreapi.CORE_EXCP_LEVEL_ERROR,
"Session.addremovectrlif()", msg)
return
ifi = node.newnetif(net = ctrlnet, ifindex = ctrlnet.CTRLIF_IDX_BASE,
ifname = "ctrl0", hwaddr = MacAddr.random(),
addrlist = addrlist)
node.netif(ifi).control = True
def updatectrlifhosts(self, remove=False):
''' Add the IP addresses of control interfaces to the /etc/hosts file.
'''
if not self.getcfgitembool('update_etc_hosts', False):
return
id = "ctrlnet"
try:
ctrlnet = self.obj(id)
except KeyError:
return
header = "CORE session %s host entries" % self.sessionid
if remove:
if self.getcfgitembool('verbose', False):
self.info("Removing /etc/hosts file entries.")
filedemunge('/etc/hosts', header)
return
entries = []
for ifc in ctrlnet.netifs():
name = ifc.node.name
for addr in ifc.addrlist:
entries.append("%s %s" % (addr.split('/')[0], ifc.node.name))
if self.getcfgitembool('verbose', False):
self.info("Adding %d /etc/hosts file entries." % len(entries))
filemunge('/etc/hosts', header, '\n'.join(entries) + '\n')
def runtime(self):
''' Return the current time we have been in the runtime state, or zero
if not in runtime.
'''
if self.getstate() == coreapi.CORE_EVENT_RUNTIME_STATE:
return time.time() - self._time
else:
return 0.0
def addevent(self, etime, node=None, name=None, data=None):
''' Add an event to the event queue, with a start time relative to the
start of the runtime state.
'''
etime = float(etime)
runtime = self.runtime()
if runtime > 0.0:
if time <= runtime:
self.warn("Could not schedule past event for time %s " \
"(run time is now %s)" % (time, runtime))
return
etime = etime - runtime
func = self.runevent
self.evq.add_event(etime, func, node=node, name=name, data=data)
if name is None:
name = ""
self.info("scheduled event %s at time %s data=%s" % \
(name, etime + runtime, data))
def runevent(self, node=None, name=None, data=None):
''' Run a scheduled event, executing commands in the data string.
'''
now = self.runtime()
if name is None:
name = ""
self.info("running event %s at time %s cmd=%s" % (name, now, data))
if node is None:
mutedetach(shlex.split(data))
else:
n = self.obj(node)
n.cmd(shlex.split(data), wait=False)
def sendobjs(self):
''' Return API messages that describe the current session.
'''
replies = []
nn = 0
# send node messages for node and network objects
with self._objslock:
for obj in self.objs():
msg = obj.tonodemsg(flags = coreapi.CORE_API_ADD_FLAG)
if msg is not None:
replies.append(msg)
nn += 1
nl = 0
# send link messages from net objects
with self._objslock:
for obj in self.objs():
linkmsgs = obj.tolinkmsgs(flags = coreapi.CORE_API_ADD_FLAG)
for msg in linkmsgs:
replies.append(msg)
nl += 1
# send model info
configs = self.mobility.getallconfigs()
configs += self.emane.getallconfigs()
for (nodenum, cls, values) in configs:
#cls = self.mobility._modelclsmap[conftype]
msg = cls.toconfmsg(flags=0, nodenum=nodenum,
typeflags=coreapi.CONF_TYPE_FLAGS_UPDATE,
values=values)
replies.append(msg)
# service customizations
svc_configs = self.services.getallconfigs()
for (nodenum, svc) in svc_configs:
opaque = "service:%s" % svc._name
tlvdata = ""
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_NODE,
nodenum)
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_OPAQUE,
opaque)
tmp = coreapi.CoreConfMessage(flags=0, hdr="", data=tlvdata)
replies.append(self.services.configure_request(tmp))
for (filename, data) in self.services.getallfiles(svc):
flags = coreapi.CORE_API_ADD_FLAG
tlvdata = coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_NODE,
nodenum)
tlvdata += coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_NAME,
str(filename))
tlvdata += coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_TYPE,
opaque)
tlvdata += coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_DATA,
str(data))
replies.append(coreapi.CoreFileMessage.pack(flags, tlvdata))
# TODO: send location info
# replies.append(self.location.toconfmsg())
# send hook scripts
for state in sorted(self._hooks.keys()):
for (filename, data) in self._hooks[state]:
flags = coreapi.CORE_API_ADD_FLAG
tlvdata = coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_NAME,
str(filename))
tlvdata += coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_TYPE,
"hook:%s" % state)
tlvdata += coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_DATA,
str(data))
replies.append(coreapi.CoreFileMessage.pack(flags, tlvdata))
# send meta data
tmp = coreapi.CoreConfMessage(flags=0, hdr="", data="")
opts = self.options.configure_request(tmp,
typeflags = coreapi.CONF_TYPE_FLAGS_UPDATE)
if opts:
replies.append(opts)
meta = self.metadata.configure_request(tmp,
typeflags = coreapi.CONF_TYPE_FLAGS_UPDATE)
if meta:
replies.append(meta)
self.info("informing GUI about %d nodes and %d links" % (nn, nl))
return replies
class SessionConfig(ConfigurableManager, Configurable):
_name = 'session'
_type = coreapi.CORE_TLV_REG_UTILITY
_confmatrix = [
("controlnet", coreapi.CONF_DATA_TYPE_STRING, '', '',
'Control network'),
("controlnet_updown_script", coreapi.CONF_DATA_TYPE_STRING, '', '',
'Control network script'),
("enablerj45", coreapi.CONF_DATA_TYPE_BOOL, '1', 'On,Off',
'Enable RJ45s'),
("preservedir", coreapi.CONF_DATA_TYPE_BOOL, '0', 'On,Off',
'Preserve session dir'),
("enablesdt", coreapi.CONF_DATA_TYPE_BOOL, '0', 'On,Off',
'Enable SDT3D output'),
("sdturl", coreapi.CONF_DATA_TYPE_STRING, Sdt.DEFAULT_SDT_URL, '',
'SDT3D URL'),
]
_confgroups = "Options:1-%d" % len(_confmatrix)
def __init__(self, session):
ConfigurableManager.__init__(self, session)
session.broker.handlers += (self.handledistributed, )
self.reset()
def reset(self):
defaults = self.getdefaultvalues()
for k in self.getnames():
# value may come from config file
v = self.session.getcfgitem(k)
if v is None:
v = self.valueof(k, defaults)
v = self.offontobool(v)
setattr(self, k, v)
def configure_values(self, msg, values):
return self.configure_values_keyvalues(msg, values, self,
self.getnames())
def configure_request(self, msg, typeflags = coreapi.CONF_TYPE_FLAGS_NONE):
nodenum = msg.gettlv(coreapi.CORE_TLV_CONF_NODE)
values = []
for k in self.getnames():
v = getattr(self, k)
if v is None:
v = ""
values.append("%s" % v)
return self.toconfmsg(0, nodenum, typeflags, values)
def handledistributed(self, msg):
''' Handle the session options config message as it has reached the
broker. Options requiring modification for distributed operation should
be handled here.
'''
if not self.session.master:
return
if msg.msgtype != coreapi.CORE_API_CONF_MSG or \
msg.gettlv(coreapi.CORE_TLV_CONF_OBJ) != "session":
return
values_str = msg.gettlv(coreapi.CORE_TLV_CONF_VALUES)
if values_str is None:
return
values = values_str.split('|')
if not self.haskeyvalues(values):
return
for v in values:
key, value = v.split('=', 1)
if key == "controlnet":
self.handledistributedcontrolnet(msg, values, values.index(v))
def handledistributedcontrolnet(self, msg, values, idx):
''' Modify Config Message if multiple control network prefixes are
defined. Map server names to prefixes and repack the message before
it is forwarded to slave servers.
'''
kv = values[idx]
key, value = kv.split('=', 1)
controlnets = value.split()
if len(controlnets) < 2:
return # multiple controlnet prefixes do not exist
servers = self.session.broker.getserverlist()
if len(servers) < 2:
return # not distributed
servers.remove("localhost")
servers.insert(0, "localhost") # master always gets first prefix
# create list of "server1:ctrlnet1 server2:ctrlnet2 ..."
controlnets = map(lambda(x): "%s:%s" % (x[0],x[1]),
zip(servers, controlnets))
values[idx] = "controlnet=%s" % (' '.join(controlnets))
values_str = '|'.join(values)
msg.tlvdata[coreapi.CORE_TLV_CONF_VALUES] = values_str
msg.repack()
class SessionMetaData(ConfigurableManager):
''' Metadata is simply stored in a configs[] dict. Key=value pairs are
passed in from configure messages destined to the "metadata" object.
The data is not otherwise interpreted or processed.
'''
_name = "metadata"
_type = coreapi.CORE_TLV_REG_UTILITY
def configure_values(self, msg, values):
if values is None:
return None
kvs = values.split('|')
for kv in kvs:
try:
(key, value) = kv.split('=', 1)
except ValueError:
raise ValueError, "invalid key in metdata: %s" % kv
self.additem(key, value)
return None
def configure_request(self, msg, typeflags = coreapi.CONF_TYPE_FLAGS_NONE):
nodenum = msg.gettlv(coreapi.CORE_TLV_CONF_NODE)
values_str = "|".join(map(lambda(k,v): "%s=%s" % (k,v), self.items()))
return self.toconfmsg(0, nodenum, typeflags, values_str)
def toconfmsg(self, flags, nodenum, typeflags, values_str):
tlvdata = ""
if nodenum is not None:
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_NODE,
nodenum)
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_OBJ,
self._name)
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_TYPE,
typeflags)
datatypes = tuple( map(lambda(k,v): coreapi.CONF_DATA_TYPE_STRING,
self.items()) )
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_DATA_TYPES,
datatypes)
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_VALUES,
values_str)
msg = coreapi.CoreConfMessage.pack(flags, tlvdata)
return msg
def additem(self, key, value):
self.configs[key] = value
def items(self):
return self.configs.iteritems()
atexit.register(Session.atexit)
|
D3f0/coreemu
|
daemon/core/session.py
|
Python
|
bsd-2-clause
| 46,224
|
"""
sphinxcontrib.httpdomain
~~~~~~~~~~~~~~~~~~~~~~~~
The HTTP domain for documenting RESTful HTTP APIs.
:copyright: Copyright 2011 by Hong Minhee
:license: BSD, see LICENSE for details.
"""
import re
from docutils import nodes
from docutils.parsers.rst.roles import set_classes
from pygments.lexer import RegexLexer, bygroups
from pygments.lexers import get_lexer_by_name
from pygments.token import Literal, Text, Operator, Keyword, Name, Number
from pygments.util import ClassNotFound
from sphinx import addnodes
from sphinx.roles import XRefRole
from sphinx.domains import Domain, ObjType, Index
from sphinx.directives import ObjectDescription
from sphinx.util.nodes import make_refnode
from sphinx.util.docfields import GroupedField, TypedField
class DocRef(object):
"""Represents a link to an RFC which defines an HTTP method."""
def __init__(self, base_url, anchor, section):
"""Stores the specified attributes which represent a URL which links to
an RFC which defines an HTTP method.
"""
self.base_url = base_url
self.anchor = anchor
self.section = section
def __repr__(self):
"""Returns the URL which this object represents, which points to the
location of the RFC which defines some HTTP method.
"""
return '{}#{}{}'.format(self.base_url, self.anchor, self.section)
#: The URL of the HTTP/1.1 RFC which defines the HTTP methods OPTIONS, GET,
#: HEAD, POST, PUT, DELETE, TRACE, and CONNECT.
RFC2616 = 'http://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html'
#: The name to use for section anchors in RFC2616.
RFC2616ANCHOR = 'sec'
#: The URL of the RFC which defines the HTTP PATCH method.
RFC5789 = 'http://tools.ietf.org/html/rfc5789'
#: The name to use for section anchors in RFC5789.
RFC5789ANCHOR = 'section-'
#: Mapping from lowercase HTTP method name to :class:`DocRef` object which
#: maintains the URL which points to the section of the RFC which defines that
#: HTTP method.
DOCREFS = {
'patch': DocRef(RFC5789, RFC5789ANCHOR, 2),
'options': DocRef(RFC2616, RFC2616ANCHOR, 9.2),
'get': DocRef(RFC2616, RFC2616ANCHOR, 9.3),
'head': DocRef(RFC2616, RFC2616ANCHOR, 9.4),
'post': DocRef(RFC2616, RFC2616ANCHOR, 9.5),
'put': DocRef(RFC2616, RFC2616ANCHOR, 9.6),
'delete': DocRef(RFC2616, RFC2616ANCHOR, 9.7),
'trace': DocRef(RFC2616, RFC2616ANCHOR, 9.8),
'connect': DocRef(RFC2616, RFC2616ANCHOR, 9.9)
}
HTTP_STATUS_CODES = {
100: 'Continue',
101: 'Switching Protocols',
102: 'Processing',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
207: 'Multi Status',
226: 'IM Used', # see RFC 3229
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required', # unused
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
418: "I'm a teapot", # see RFC 2324
422: 'Unprocessable Entity',
423: 'Locked',
424: 'Failed Dependency',
426: 'Upgrade Required',
449: 'Retry With', # proprietary MS extension
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
507: 'Insufficient Storage',
510: 'Not Extended'
}
http_sig_param_re = re.compile(r'\((?:(?P<type>[^:)]+):)?(?P<name>[\w_]+)\)',
re.VERBOSE)
def http_resource_anchor(method, path):
path = re.sub(r'[<>:/]', '-', path)
return method.lower() + '-' + path
class HTTPResource(ObjectDescription):
doc_field_types = [
TypedField('parameter', label='Parameters',
names=('param', 'parameter', 'arg', 'argument'),
typerolename='obj', typenames=('paramtype', 'type')),
TypedField('jsonparameter', label='Json Parameters',
names=('jsonparameter', 'jsonparam', 'json'),
typerolename='obj', typenames=('jsonparamtype', 'jsontype')),
TypedField('queryparameter', label='Query Parameters',
names=('queryparameter', 'queryparam', 'qparam', 'query'),
typerolename='obj', typenames=('queryparamtype', 'querytype', 'qtype')),
GroupedField('formparameter', label='Form Parameters',
names=('formparameter', 'formparam', 'fparam', 'form')),
GroupedField('requestheader', label='Request Headers',
rolename='mailheader',
names=('reqheader', 'requestheader')),
GroupedField('responseheader', label='Response Headers',
rolename='mailheader',
names=('resheader', 'responseheader')),
GroupedField('statuscode', label='Status Codes',
rolename='statuscode',
names=('statuscode', 'status', 'code'))
]
method = NotImplemented
def handle_signature(self, sig, signode):
method = self.method.upper() + ' '
signode += addnodes.desc_name(method, method)
offset = 0
for match in http_sig_param_re.finditer(sig):
path = sig[offset:match.start()]
signode += addnodes.desc_name(path, path)
params = addnodes.desc_parameterlist()
typ = match.group('type')
if typ:
typ = typ + ': '
params += addnodes.desc_annotation(typ, typ)
name = match.group('name')
params += addnodes.desc_parameter(name, name)
signode += params
offset = match.end()
if offset < len(sig):
path = sig[offset:len(sig)]
signode += addnodes.desc_name(path, path)
fullname = self.method.upper() + ' ' + path
signode['method'] = self.method
signode['path'] = sig
signode['fullname'] = fullname
return (fullname, self.method, sig)
def needs_arglist(self):
return False
def add_target_and_index(self, name_cls, sig, signode):
signode['ids'].append(http_resource_anchor(*name_cls[1:]))
self.env.domaindata['http'][self.method][sig] = (self.env.docname, '')
def get_index_text(self, modname, name):
return ''
class HTTPOptions(HTTPResource):
method = 'options'
class HTTPHead(HTTPResource):
method = 'head'
class HTTPPatch(HTTPResource):
method = 'patch'
class HTTPPost(HTTPResource):
method = 'post'
class HTTPGet(HTTPResource):
method = 'get'
class HTTPPut(HTTPResource):
method = 'put'
class HTTPDelete(HTTPResource):
method = 'delete'
class HTTPTrace(HTTPResource):
method = 'trace'
def http_statuscode_role(name, rawtext, text, lineno, inliner,
options={}, content=[]):
if text.isdigit():
code = int(text)
try:
status = HTTP_STATUS_CODES[code]
except KeyError:
msg = inliner.reporter.error('%d is invalid HTTP status code'
% code, lineno=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
else:
try:
code, status = re.split(r'\s', text.strip(), 1)
code = int(code)
except ValueError:
msg = inliner.reporter.error(
'HTTP status code must be an integer (e.g. `200`) or '
'start with an integer (e.g. `200 OK`); %r is invalid' %
text,
line=lineno
)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
nodes.reference(rawtext)
if code == 226:
url = 'http://www.ietf.org/rfc/rfc3229.txt'
if code == 418:
url = 'http://www.ietf.org/rfc/rfc2324.txt'
if code == 449:
url = 'http://msdn.microsoft.com/en-us/library' \
'/dd891478(v=prot.10).aspx'
elif code in HTTP_STATUS_CODES:
url = 'http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html' \
'#sec10.' + ('%d.%d' % (code // 100, 1 + code % 100))
else:
url = ''
set_classes(options)
node = nodes.reference(rawtext, '%d %s' % (code, status),
refuri=url, **options)
return [node], []
def http_method_role(name, rawtext, text, lineno, inliner,
options={}, content=[]):
method = str(text).lower()
if method not in DOCREFS:
msg = inliner.reporter.error('%s is not valid HTTP method' % method,
lineno=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
url = str(DOCREFS[method])
node = nodes.reference(rawtext, method.upper(), refuri=url, **options)
return [node], []
class HTTPXRefRole(XRefRole):
def __init__(self, method, **kwargs):
XRefRole.__init__(self, **kwargs)
self.method = method
def process_link(self, env, refnode, has_explicit_title, title, target):
if not target.startswith('/'):
pass
if not has_explicit_title:
title = self.method.upper() + ' ' + title
return title, target
class HTTPIndex(Index):
name = 'routingtable'
localname = 'HTTP Routing Table'
shortname = 'routing table'
def __init__(self, *args, **kwargs):
super(HTTPIndex, self).__init__(*args, **kwargs)
self.ignore = [[l for l in x.split('/') if l]
for x in self.domain.env.config['http_index_ignore_prefixes']]
self.ignore.sort(key=lambda x: -len(x))
def grouping_prefix(self, path):
letters = [x for x in path.split('/') if x]
for prefix in self.ignore:
if letters[:len(prefix)] == prefix:
return '/' + '/'.join(letters[:len(prefix) + 1])
return '/%s' % (letters[0] if letters else '',)
def generate(self, docnames=None):
content = {}
items = ((method, path, info)
for method, routes in self.domain.routes.items()
for path, info in routes.items())
items = sorted(items, key=lambda item: item[1])
for method, path, info in items:
entries = content.setdefault(self.grouping_prefix(path), [])
entries.append([
method.upper() + ' ' + path, 0, info[0],
http_resource_anchor(method, path), '', '', info[1]
])
content = sorted(content.items(), key=lambda k: k[0])
return (content, True)
class HTTPDomain(Domain):
"""HTTP domain."""
name = 'http'
label = 'HTTP'
object_types = {
'options': ObjType('options', 'options', 'obj'),
'head': ObjType('head', 'head', 'obj'),
'post': ObjType('post', 'post', 'obj'),
'get': ObjType('get', 'get', 'obj'),
'put': ObjType('put', 'put', 'obj'),
'patch': ObjType('patch', 'patch', 'obj'),
'delete': ObjType('delete', 'delete', 'obj'),
'trace': ObjType('trace', 'trace', 'obj')
}
directives = {
'options': HTTPOptions,
'head': HTTPHead,
'post': HTTPPost,
'get': HTTPGet,
'put': HTTPPut,
'patch': HTTPPatch,
'delete': HTTPDelete,
'trace': HTTPTrace
}
roles = {
'options': HTTPXRefRole('options'),
'head': HTTPXRefRole('head'),
'post': HTTPXRefRole('post'),
'get': HTTPXRefRole('get'),
'put': HTTPXRefRole('put'),
'patch': HTTPXRefRole('patch'),
'delete': HTTPXRefRole('delete'),
'trace': HTTPXRefRole('trace'),
'statuscode': http_statuscode_role,
'method': http_method_role
}
initial_data = {
'options': {}, # path: (docname, synopsis)
'head': {},
'post': {},
'get': {},
'put': {},
'patch': {},
'delete': {},
'trace': {}
}
indices = [HTTPIndex]
@property
def routes(self):
return dict((key, self.data[key]) for key in self.object_types)
def clear_doc(self, docname):
for typ, routes in self.routes.items():
for path, info in routes.items():
if info[0] == docname:
del routes[path]
def resolve_xref(self, env, fromdocname, builder, typ, target,
node, contnode):
try:
info = self.data[str(typ)][target]
except KeyError:
return
else:
anchor = http_resource_anchor(typ, target)
title = typ.upper() + ' ' + target
return make_refnode(builder, fromdocname, info[0], anchor,
contnode, title)
def get_objects(self):
for method, routes in self.routes.items():
for path, info in routes.items():
anchor = http_resource_anchor(method, path)
yield (path, path, method, info[0], anchor, 1)
class HTTPLexer(RegexLexer):
"""Lexer for HTTP sessions."""
name = 'HTTP'
aliases = ['http']
flags = re.DOTALL
def header_callback(self, match):
if match.group(1).lower() == 'content-type':
content_type = match.group(5).strip()
if ';' in content_type:
content_type = content_type[:content_type.find(';')].strip()
self.content_type = content_type
yield match.start(1), Name.Attribute, match.group(1)
yield match.start(2), Text, match.group(2)
yield match.start(3), Operator, match.group(3)
yield match.start(4), Text, match.group(4)
yield match.start(5), Literal, match.group(5)
yield match.start(6), Text, match.group(6)
def continuous_header_callback(self, match):
yield match.start(1), Text, match.group(1)
yield match.start(2), Literal, match.group(2)
yield match.start(3), Text, match.group(3)
def content_callback(self, match):
content_type = getattr(self, 'content_type', None)
content = match.group()
offset = match.start()
if content_type:
from pygments.lexers import get_lexer_for_mimetype
try:
lexer = get_lexer_for_mimetype(content_type)
except ClassNotFound:
pass
else:
for idx, token, value in lexer.get_tokens_unprocessed(content):
yield offset + idx, token, value
return
yield offset, Text, content
tokens = {
'root': [
(r'(GET|POST|PUT|PATCH|DELETE|HEAD|OPTIONS|TRACE)( +)([^ ]+)( +)'
r'(HTTPS?)(/)(1\.[01])(\r?\n|$)',
bygroups(Name.Function, Text, Name.Namespace, Text,
Keyword.Reserved, Operator, Number, Text),
'headers'),
(r'(HTTPS?)(/)(1\.[01])( +)(\d{3})( +)([^\r\n]+)(\r?\n|$)',
bygroups(Keyword.Reserved, Operator, Number, Text, Number,
Text, Name.Exception, Text),
'headers'),
],
'headers': [
(r'([^\s:]+)( *)(:)( *)([^\r\n]+)(\r?\n|$)', header_callback),
(r'([\t ]+)([^\r\n]+)(\r?\n|$)', continuous_header_callback),
(r'\r?\n', Text, 'content')
],
'content': [
(r'.+', content_callback)
]
}
def setup(app):
app.add_domain(HTTPDomain)
try:
get_lexer_by_name('http')
except ClassNotFound:
app.add_lexer('http', HTTPLexer())
app.add_config_value('http_index_ignore_prefixes', [], None)
|
cutoffthetop/httpdomain
|
sphinxcontrib/httpdomain.py
|
Python
|
bsd-2-clause
| 16,298
|
from lino.api import dd
class Tag(dd.Model):
name = dd.CharField(max_length=100)
def __str__(self):
return self.name
@dd.receiver(dd.auto_create)
def my_auto_create_handler(sender, **kw):
print("My handler was called with {}".format(sender))
|
lino-framework/book
|
lino_book/projects/auto_create/models.py
|
Python
|
bsd-2-clause
| 267
|
from pathutils import full_path
CONFIG = {
"entityid" : "urn:mace:example.com:saml:roland:sp",
"name" : "urn:mace:example.com:saml:roland:sp",
"description": "My own SP",
"service": {
"sp": {
"endpoints":{
"assertion_consumer_service": ["http://lingon.catalogix.se:8087/"],
},
"required_attributes": ["surName", "givenName", "mail"],
"optional_attributes": ["title"],
"idp": ["urn:mace:example.com:saml:roland:idp"],
}
},
"debug" : 1,
"key_file" : full_path("test.key"),
"cert_file" : full_path("test.pem"),
"xmlsec_binary" : None,
"metadata": {
"local": [full_path("idp_2.xml")],
},
"virtual_organization" : {
"urn:mace:example.com:it:tek":{
"nameid_format" : "urn:oid:1.3.6.1.4.1.1466.115.121.1.15-NameID",
"common_identifier": "umuselin",
}
},
"subject_data": full_path("subject_data.db"),
"accepted_time_diff": 60,
"attribute_map_dir" : full_path("attributemaps"),
"organization": {
"name": ("AB Exempel", "se"),
"display_name": ("AB Exempel", "se"),
"url": "http://www.example.org",
},
"contact_person": [{
"given_name": "Roland",
"sur_name": "Hedberg",
"telephone_number": "+46 70 100 0000",
"email_address": ["tech@eample.com", "tech@example.org"],
"contact_type": "technical"
},
],
"secret": "0123456789",
"only_use_keys_in_metadata": True
}
|
arbn/pysaml2
|
tests/sp_2_conf.py
|
Python
|
bsd-2-clause
| 1,571
|
# Copyright (c) 2014 Alexander Bredo
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import time
from multiprocessing import Lock
class IndexedTimeCache():
'''
@param ttl: Maxmimum Time to live for inserted item (first one will be applied)
'''
lock = Lock()
def __init__(self, ttl=30):
self.cache = dict()
self.ttl = ttl
def insert(self, index, data, ignore_fields=[]):
IndexedTimeCache.lock.acquire()
if index in self.cache: # UPDATE + AGGREGATE
self.cache[index]['data'] = self.__aggregate(self.cache[index]['data'], data, ignore_fields)
else: # NEW
self.cache[index] = {
'timestamp': int(time.time()), # Insert Time
'data': data
}
IndexedTimeCache.lock.release()
def size(self):
return len(self.cache)
def getItemsOutOfTTL(self):
IndexedTimeCache.lock.acquire()
cache_outofdate = dict()
cache_new = dict()
for k,v in self.cache.items():
if v['timestamp'] < (time.time() - self.ttl):
cache_outofdate[k] = v
else:
cache_new[k] = v
self.cache = cache_new # Update Cache
IndexedTimeCache.lock.release()
#print(len(cache_outofdate), len(cache_new))
#print(cache_outofdate)
#print(cache_new)
return [item['data'] for item in cache_outofdate.values()]
# cache_outofdate: dict_values([{'data': {'b': 1, 'a': 2, 'c': 4}, 'timestamp': 1403523219}, {...} ])
# Return: [{'c': 2, 'b': 23, 'a': 25}, {'c': 2, 'b': 32, 'a': 29}, ...
def __aggregate(self, old, new, ignore_fields):
aggregated = old
for key, value in new.items():
if isinstance(value, dict):
for sub_key, sub_value in value.items():
if key in aggregated and (key not in ignore_fields or sub_key not in ignore_fields):
if sub_key in aggregated[key]:
aggregated[key][sub_key] += sub_value
else:
print("ERROR: Stats-Aggregation. Fields not found")
#aggregated[key][sub_key] = dict()
#aggregated[key][sub_key] = sub_value
else:
aggregated[key] = dict() #copy?
print("ERROR: Stats-Aggregation. Fields not found")
elif key not in ignore_fields:
aggregated[key] += new[key]
return aggregated
'''
import random
c = IndexedTimeCache(0)
for i in range(0,50):
c.insert((int(time.time() - random.randint(1, 5))), { 'a': random.randint(1, 5), 'b': random.randint(1, 5), 'c': random.randint(1, 5) }, ['c'])
print(c.size())
print("====", c.getItemsOutOfTTL())
print(c.size())
'''
'''
c = IndexedTimeCache(0)
c.insert('123456789Hamburg', {
"@timestamp": 123456789,
"networkLocation": "Hamburg",
"flow_request": {
"packetDeltaCountPerSec": 30,
"octetDeltaCountPerSec": 30,
"flowDurationMilliseconds": 300
}
})
c.insert('123456789Hamburg', {
"@timestamp": 123456789,
"networkLocation": "Hamburg",
"flow_request": {
"packetDeltaCountPerSec": 60,
"octetDeltaCountPerSec": 60,
"flowDurationMilliseconds": 600
}
})
c.insert('123456789Hamburg', {
"@timestamp": 123456789,
"networkLocation": "Hamburg",
"flow_request": {
"packetDeltaCountPerSec": 20,
"octetDeltaCountPerSec": 200,
"flowDurationMilliseconds": 2000
}
})
print(c.getItemsOutOfTTL())
'''
|
alexbredo/ipfix-receiver
|
base/cache.py
|
Python
|
bsd-2-clause
| 4,389
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'county_vmt.ui'
#
# Created: Thu Nov 21 11:10:25 2013
# by: PyQt4 UI code generator 4.10.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(389, 320)
self.horizontalLayout = QtGui.QHBoxLayout(Form)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.tableView = QtGui.QTableView(Form)
self.tableView.setObjectName(_fromUtf8("tableView"))
self.horizontalLayout.addWidget(self.tableView)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
|
anl-tracc/polaris-analyser
|
county_vmt.py
|
Python
|
bsd-3-clause
| 1,297
|
import itertools
from django.conf import settings
from django.db import models
from django.utils import translation as translation_utils
from olympia.addons.query import IndexCompiler, IndexQuery
def order_by_translation(qs, fieldname):
"""
Order the QuerySet by the translated field, honoring the current and
fallback locales. Returns a new QuerySet.
The model being sorted needs a get_fallback() classmethod that describes
the fallback locale. get_fallback() can return a string or a Field.
"""
if fieldname.startswith('-'):
desc = True
fieldname = fieldname[1:]
else:
desc = False
qs = qs.all()
model = qs.model
field = model._meta.get_field(fieldname)
# connection is a tuple (lhs, table, join_cols)
connection = (model._meta.db_table, field.rel.to._meta.db_table,
field.rel.field_name)
# Doing the manual joins is flying under Django's radar, so we need to make
# sure the initial alias (the main table) is set up.
if not qs.query.tables:
qs.query.get_initial_alias()
# Force two new joins against the translation table, without reusing any
# aliases. We'll hook up the language fallbacks later.
# Passing `reuse=set()` force new joins, and passing `nullable=True`
# forces django to make LEFT OUTER JOINs (otherwise django, because we are
# building the query manually, does not detect that an inner join would
# remove results and happily simplifies the LEFT OUTER JOINs to
# INNER JOINs)
qs.query = qs.query.clone(TranslationQuery)
t1 = qs.query.join(connection, join_field=field, reuse=set(),
nullable=True)
t2 = qs.query.join(connection, join_field=field, reuse=set(),
nullable=True)
qs.query.translation_aliases = {field: (t1, t2)}
f1, f2 = '%s.`localized_string`' % t1, '%s.`localized_string`' % t2
name = 'translated_%s' % field.column
ifnull = 'IFNULL(%s, %s)' % (f1, f2)
prefix = '-' if desc else ''
return qs.extra(select={name: ifnull},
where=['(%s IS NOT NULL OR %s IS NOT NULL)' % (f1, f2)],
order_by=[prefix + name])
class TranslationQuery(IndexQuery):
"""
Overrides sql.Query to hit our special compiler that knows how to JOIN
translations.
"""
def clone(self, klass=None, **kwargs):
# Maintain translation_aliases across clones.
c = super(TranslationQuery, self).clone(klass, **kwargs)
c.translation_aliases = self.translation_aliases
return c
def get_compiler(self, using=None, connection=None):
# Call super to figure out using and connection.
c = super(TranslationQuery, self).get_compiler(using, connection)
return SQLCompiler(self, c.connection, c.using)
class SQLCompiler(IndexCompiler):
"""Overrides get_from_clause to LEFT JOIN translations with a locale."""
def get_from_clause(self):
# Temporarily remove translation tables from query.tables so Django
# doesn't create joins against them.
old_tables = list(self.query.tables)
for table in itertools.chain(*self.query.translation_aliases.values()):
self.query.tables.remove(table)
joins, params = super(SQLCompiler, self).get_from_clause()
# fallback could be a string locale or a model field.
params.append(translation_utils.get_language())
if hasattr(self.query.model, 'get_fallback'):
fallback = self.query.model.get_fallback()
else:
fallback = settings.LANGUAGE_CODE
if not isinstance(fallback, models.Field):
params.append(fallback)
# Add our locale-aware joins. We're not respecting the table ordering
# Django had in query.tables, but that seems to be ok.
for field, aliases in self.query.translation_aliases.items():
t1, t2 = aliases
joins.append(self.join_with_locale(t1))
joins.append(self.join_with_locale(t2, fallback))
self.query.tables = old_tables
return joins, params
def join_with_locale(self, alias, fallback=None):
# This is all lifted from the real sql.compiler.get_from_clause(),
# except for the extra AND clause. Fun project: fix Django to use Q
# objects here instead of a bunch of strings.
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
mapping = self.query.alias_map[alias]
# name, alias, join_type, lhs, lhs_col, col, nullable = mapping
name, alias, join_type, lhs, join_cols, _, join_field = mapping
lhs_col = join_field.column
rhs_col = join_cols
alias_str = '' if alias == name else (' %s' % alias)
if isinstance(fallback, models.Field):
fallback_str = '%s.%s' % (qn(self.query.model._meta.db_table),
qn(fallback.column))
else:
fallback_str = '%s'
return ('%s %s%s ON (%s.%s = %s.%s AND %s.%s = %s)' %
(join_type, qn(name), alias_str,
qn(lhs), qn2(lhs_col), qn(alias), qn2(rhs_col),
qn(alias), qn('locale'), fallback_str))
|
andymckay/addons-server
|
src/olympia/translations/query.py
|
Python
|
bsd-3-clause
| 5,271
|
from .iqr_session import IqrSession
from .iqr_controller import IqrController
__all__ = [
'IqrController',
'IqrSession',
]
|
Purg/SMQTK
|
python/smqtk/iqr/__init__.py
|
Python
|
bsd-3-clause
| 132
|
import pytz
from django.conf import settings
from django.utils.dates import MONTHS, WEEKDAYS, WEEKDAYS_ABBR
from django.utils.translation import gettext as _
# Wagtail languages with >=90% coverage
# This list is manually maintained
WAGTAILADMIN_PROVIDED_LANGUAGES = [
('ar', 'Arabic'),
('ca', 'Catalan'),
('cs', 'Czech'),
('de', 'German'),
('el', 'Greek'),
('en', 'English'),
('es', 'Spanish'),
('et', 'Estonian'),
('fi', 'Finnish'),
('fr', 'French'),
('gl', 'Galician'),
('hr', 'Croatian'),
('hu', 'Hungarian'),
('id-id', 'Indonesian'),
('is-is', 'Icelandic'),
('it', 'Italian'),
('ja', 'Japanese'),
('ko', 'Korean'),
('lt', 'Lithuanian'),
('mn', 'Mongolian'),
('nb', 'Norwegian Bokmål'),
('nl-nl', 'Netherlands Dutch'),
('fa', 'Persian'),
('pl', 'Polish'),
('pt-br', 'Brazilian Portuguese'),
('pt-pt', 'Portuguese'),
('ro', 'Romanian'),
('ru', 'Russian'),
('sv', 'Swedish'),
('sk-sk', 'Slovak'),
('th', 'Thai'),
('tr', 'Turkish'),
('uk', 'Ukrainian'),
('zh-hans', 'Chinese (Simplified)'),
('zh-hant', 'Chinese (Traditional)'),
]
# Translatable strings to be made available to JavaScript code
# as the wagtailConfig.STRINGS object
def get_js_translation_strings():
return {
'DELETE': _('Delete'),
'EDIT': _('Edit'),
'PAGE': _('Page'),
'PAGES': _('Pages'),
'LOADING': _('Loading…'),
'NO_RESULTS': _('No results'),
'SERVER_ERROR': _('Server Error'),
'SEE_ALL': _('See all'),
'CLOSE_EXPLORER': _('Close explorer'),
'ALT_TEXT': _('Alt text'),
'DECORATIVE_IMAGE': _('Decorative image'),
'WRITE_HERE': _('Write here…'),
'HORIZONTAL_LINE': _('Horizontal line'),
'LINE_BREAK': _('Line break'),
'UNDO': _('Undo'),
'REDO': _('Redo'),
'RELOAD_PAGE': _('Reload the page'),
'RELOAD_EDITOR': _('Reload saved content'),
'SHOW_LATEST_CONTENT': _('Show latest content'),
'SHOW_ERROR': _('Show error'),
'EDITOR_CRASH': _('The editor just crashed. Content has been reset to the last saved version.'),
'BROKEN_LINK': _('Broken link'),
'MISSING_DOCUMENT': _('Missing document'),
'CLOSE': _('Close'),
'EDIT_PAGE': _('Edit \'{title}\''),
'VIEW_CHILD_PAGES_OF_PAGE': _('View child pages of \'{title}\''),
'PAGE_EXPLORER': _('Page explorer'),
'SAVE': _('Save'),
'SAVING': _('Saving...'),
'CANCEL': _('Cancel'),
'DELETING': _('Deleting...'),
'ADD_A_COMMENT': _('Add a comment'),
'SHOW_COMMENTS': _('Show comments'),
'REPLY': _('Reply'),
'RESOLVE': _('Resolve'),
'RETRY': _('Retry'),
'DELETE_ERROR': _('Delete error'),
'CONFIRM_DELETE_COMMENT': _('Are you sure?'),
'SAVE_ERROR': _('Save error'),
'SAVE_COMMENT_WARNING': _('This will be saved when the page is saved'),
'FOCUS_COMMENT': _('Focus comment'),
'UNFOCUS_COMMENT': _('Unfocus comment'),
'COMMENT': _('Comment'),
'MORE_ACTIONS': _('More actions'),
'SAVE_PAGE_TO_ADD_COMMENT': _('Save the page to add this comment'),
'SAVE_PAGE_TO_SAVE_COMMENT_CHANGES': _('Save the page to save this comment'),
'SAVE_PAGE_TO_SAVE_REPLY': _('Save the page to save this reply'),
'DASHBOARD': _('Dashboard'),
'EDIT_YOUR_ACCOUNT': _('Edit your account'),
'SEARCH': _('Search'),
'MONTHS': [str(m) for m in MONTHS.values()],
# Django's WEEKDAYS list begins on Monday, but ours should start on Sunday, so start
# counting from -1 and use modulo 7 to get an array index
'WEEKDAYS': [str(WEEKDAYS[d % 7]) for d in range(-1, 6)],
'WEEKDAYS_SHORT': [str(WEEKDAYS_ABBR[d % 7]) for d in range(-1, 6)],
}
def get_available_admin_languages():
return getattr(settings, 'WAGTAILADMIN_PERMITTED_LANGUAGES', WAGTAILADMIN_PROVIDED_LANGUAGES)
def get_available_admin_time_zones():
if not settings.USE_TZ:
return []
return getattr(settings, 'WAGTAIL_USER_TIME_ZONES', pytz.common_timezones)
|
gasman/wagtail
|
wagtail/admin/localization.py
|
Python
|
bsd-3-clause
| 4,210
|
class NamespaceAlreadyRegistered(Exception):
pass
class NoParentFound(Exception):
pass
|
rsalmaso/django-cms
|
menus/exceptions.py
|
Python
|
bsd-3-clause
| 97
|
"""This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Histogram class which lets you build your histograms just passing
the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from ...models import Range1d
from ...properties import Bool, Int
from .._builder import create_and_build
from .bar_builder import BarBuilder
from ..glyphs import HistogramGlyph
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Histogram(data, values=None, label=None, color=None, agg="count",
bins=None, yscale="linear", xgrid=False, ygrid=True,
continuous_range=None, **kw):
if continuous_range and not isinstance(continuous_range, Range1d):
raise ValueError(
"continuous_range must be an instance of bokeh.models.ranges.Range1d"
)
# The continuous_range is the y_range (until we implement HBar charts)
y_range = continuous_range
kw['label'] = label
kw['values'] = values
kw['color'] = color
kw['agg'] = agg
kw['yscale'] = yscale
kw['xgrid'] = xgrid
kw['ygrid'] = ygrid
kw['y_range'] = y_range
kw['bins'] = bins
return create_and_build(HistogramBuilder, data, **kw)
class HistogramBuilder(BarBuilder):
"""Generates one to many histograms with unique attributes.
The HistogramBuilder is responsible for producing a chart
containing one to many histograms from table-like inputs.
"""
bins = Int(default=None, help="""
Number of bins to use for the histogram. (default: None
(use Freedman-Diaconis rule)
""")
density = Bool(True, help="""
Whether to normalize the histogram. (default: True)
If True, the result is the value of the probability *density* function
at the bin, normalized such that the *integral* over the range is 1. If
False, the result will contain the number of samples in each bin.
For more info check ``numpy.histogram`` function documentation.
""")
glyph = HistogramGlyph
def _setup(self):
super(HistogramBuilder, self)._setup()
if self.attributes['color'].columns is not None:
self.fill_alpha = 0.6
def get_extra_args(self):
return dict(bin_count=self.bins)
def _set_ranges(self):
"""Push the Bar data into the ColumnDataSource and calculate
the proper ranges.
"""
x_max = max([comp_glyph.x_max for comp_glyph in self.comp_glyphs])
x_min = min([comp_glyph.x_min for comp_glyph in self.comp_glyphs])
y_max = max([comp_glyph.y_max for comp_glyph in self.comp_glyphs])
y_min = min([comp_glyph.y_min for comp_glyph in self.comp_glyphs])
x_buffer = ((x_max + x_min)/2.0)*0.1
self.x_range = Range1d(start=x_min - x_buffer, end=x_max + x_buffer)
self.y_range = Range1d(start=y_min, end=y_max * 1.1)
|
srinathv/bokeh
|
bokeh/charts/builder/histogram_builder.py
|
Python
|
bsd-3-clause
| 3,598
|
#!/usr/bin/env python
import sys
sys.path.extend(['.', '..'])
import unittest
suite = unittest.TestLoader().loadTestsFromNames(
[
'test_c_lexer',
'test_c_ast',
'test_general',
'test_c_parser',
]
)
unittest.TextTestRunner(verbosity=1).run(suite)
|
kk1987/pycparser
|
tests/all_tests.py
|
Python
|
bsd-3-clause
| 294
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Fixes usercache processing
# If there are any errors in the usercache processing, fix them and reload the data
# Basic flow
# - Copy data back to user cache
# - Attempt to moveToLongTerm
# - Find errors
# - Fix errors
# - Repeat until no errors are found
from future import standard_library
standard_library.install_aliases()
from builtins import *
import sys
import logging
logging.basicConfig(level=logging.DEBUG)
import uuid
import datetime as pydt
import json
import bson.json_util as bju
import emission.core.get_database as edb
import emission.net.usercache.abstract_usercache_handler as euah
import emission.net.usercache.abstract_usercache as enua
def fix_usercache_errors():
copy_to_usercache()
move_to_long_term()
def copy_to_usercache():
# Step 1: Copy data back to user cache
error_it = edb.get_timeseries_error_db().find()
uc = edb.get_usercache_db()
te = edb.get_timeseries_error_db()
logging.info("Found %d errors in this round" % edb.get_timeseries_error_db.estimate_document_count())
for error in error_it:
logging.debug("Copying entry %s" % error["metadata"])
save_result = uc.save(error)
remove_result = te.remove(error["_id"])
logging.debug("save_result = %s, remove_result = %s" % (save_result, remove_result))
logging.info("step copy_to_usercache DONE")
def move_to_long_term():
cache_uuid_list = enua.UserCache.get_uuid_list()
logging.info("cache UUID list = %s" % cache_uuid_list)
for uuid in cache_uuid_list:
logging.info("*" * 10 + "UUID %s: moving to long term" % uuid + "*" * 10)
uh = euah.UserCacheHandler.getUserCacheHandler(uuid)
uh.moveToLongTerm()
if __name__ == '__main__':
fix_usercache_errors()
|
shankari/e-mission-server
|
bin/debug/fix_usercache_processing.py
|
Python
|
bsd-3-clause
| 1,917
|